From c24abefcf34ebfd96eeb82c11f6bb437dce8b9cc Mon Sep 17 00:00:00 2001 From: sugarme Date: Sun, 13 Mar 2022 12:56:11 +1100 Subject: [PATCH] upgraded to libtorch-1.11 required cuda11.3 --- README.md | 4 +- gen/gen.ml | 8 +- gen/gen.ml.1.10 | 1351 + gen/pytorch/Declarations-v1.11.0.yaml | 139252 +++++++++++++++++++++++ libtch/c-generated.go | 1058 +- libtch/torch_api_generated.cpp.h | 967 +- libtch/torch_api_generated.h | 179 +- setup-gotch.sh | 2 +- setup-libtorch.sh | 4 +- ts/must-tensor-generated.go | 32996 +++--- ts/tensor-generated.go | 54803 +++++---- 11 files changed, 182979 insertions(+), 47645 deletions(-) create mode 100644 gen/gen.ml.1.10 create mode 100644 gen/pytorch/Declarations-v1.11.0.yaml diff --git a/README.md b/README.md index d6c051d..2d79a9b 100644 --- a/README.md +++ b/README.md @@ -3,10 +3,10 @@ ## Overview -`gotch` creates a thin wrapper to Pytorch C++ APIs (Libtorch) to make use of its already optimized C++ tensor APIs (~ 2169) and dynamic graph computation with CUDA support and provides idiomatic Go APIs for developing and implementing Deep Learning in Go. +`gotch` creates a thin wrapper to Pytorch C++ APIs (Libtorch) to make use of its already optimized C++ tensor APIs (~ 2209) and dynamic graph computation with CUDA support and provides idiomatic Go APIs for developing and implementing Deep Learning in Go. **Some features are** -- [x] Comprehensive Pytorch tensor APIs (~ 2169) +- [x] Comprehensive Pytorch tensor APIs (~ 1893) - [x] Fully featured Pytorch dynamic graph computation - [x] JIT interface to run model trained/saved using PyTorch Python API - [x] Load pretrained Pytorch models and run inference diff --git a/gen/gen.ml b/gen/gen.ml index 06e6cce..ddc3ed6 100644 --- a/gen/gen.ml +++ b/gen/gen.ml @@ -41,7 +41,11 @@ let excluded_functions = ; "linalg_vector_norm" ; "linalg_vector_norm_out" ; "linalg_matrix_norm" - ; "linalg_matrix_norm_out"] + ; "linalg_matrix_norm_out" + ; "_histogramdd_bin_edges" + ; "_histogramdd_bin_edges" + ; "_histogramdd_from_bin_cts" + ; "_linalg_check_errors"] let no_tensor_options = Set.of_list @@ -1344,7 +1348,7 @@ let run ~yaml_filename ~cpp_filename ~ffi_filename ~must_wrapper_filename write_wrapper funcs wrapper_filename let () = - run ~yaml_filename:"gen/pytorch/Declarations-v1.10.0.yaml" + run ~yaml_filename:"gen/pytorch/Declarations-v1.11.0.yaml" ~cpp_filename:"libtch/torch_api_generated" ~ffi_filename:"libtch/c-generated.go" ~must_wrapper_filename:"ts/must-tensor-generated.go" diff --git a/gen/gen.ml.1.10 b/gen/gen.ml.1.10 new file mode 100644 index 0000000..06e6cce --- /dev/null +++ b/gen/gen.ml.1.10 @@ -0,0 +1,1351 @@ +(* Automatically generated C++ -> C -> Go bindings. + Input: Declarations-VERSION.yaml artifact generated when building Pytorch from source. + Run with: dune exec gen/gen.exe +*) +open Base +open Stdio + +let excluded_functions = + Set.of_list + (module String) + [ "multi_margin_loss" + ; "multi_margin_loss_out" + ; "log_softmax_backward_data" + ; "softmax_backward_data" + ; "clone" + ; "copy_" + ; "conv_transpose2d_backward_out" + ; "conv_transpose3d_backward_out" + ; "slow_conv_transpose2d_backward_out" + ; "slow_conv_transpose3d_backward_out" + ; "slow_conv3d_backward_out" + ; "normal" + ; "_cufft_set_plan_cache_max_size" + ; "_cufft_clear_plan_cache" + ; "backward" + ; "set_data" + ; "_amp_non_finite_check_and_unscale_" + ; "_amp_foreach_non_finite_check_and_unscale_" + ; "_cummin_helper" + ; "_cummax_helper" + ; "retain_grad" + ; "_validate_sparse_coo_tensor_args" + ; "_validate_sparse_csr_tensor_args" + ; "_backward" + ; "size" + ; "stride" + ; "histogram_out" + ; "histogram" + ; "_assert_async" + ; "gradient" + ; "linalg_vector_norm" + ; "linalg_vector_norm_out" + ; "linalg_matrix_norm" + ; "linalg_matrix_norm_out"] + +let no_tensor_options = + Set.of_list + (module String) + [ "zeros_like" + ; "empty_like" + ; "full_like" + ; "ones_like" + ; "rand_like" + ; "randint_like" + ; "randn_like" ] + +(* + * let prefixed_functions = + * Set.of_list + * (module String) + * ["add"; "add_"; "div"; "div_"; "mul"; "mul_"; "sub"; "sub_"; "nll_loss"] + * *) +let excluded_prefixes = ["_thnn_"; "_th_"; "thnn_"; "th_"; "_foreach"] + +let excluded_suffixes = ["_forward"; "_forward_out"] + +let yaml_error yaml ~msg = + Printf.failwithf "%s, %s" msg (Yaml.to_string_exn yaml) () + +let extract_bool = function + | `Bool b -> b + | `String "true" -> true + | `String "false" -> false + | yaml -> yaml_error yaml ~msg:"expected bool" + +let extract_list = function + | `A l -> l + | yaml -> yaml_error yaml ~msg:"expected list" + +let extract_map = function + | `O map -> Map.of_alist_exn (module String) map + | yaml -> yaml_error yaml ~msg:"expected map" + +let extract_string = function + | `String s -> s + (* The yaml spec for torch uses n which is converted to a bool. *) + | `Bool b -> if b then "y" else "n" + | `Float f -> Float.to_string f + | yaml -> yaml_error yaml ~msg:"expected string" + +module Func = struct + type arg_type = + | Bool + | Int64 + | Int64Option + | Double + | DoubleOption + | Tensor + | TensorOption + (* Tensor.t option *) + | IntList + | TensorOptList + | TensorList + | TensorOptions + (* Tensor kind and device *) + | Scalar + | ScalarType + | Device + | String + + type arg = + {arg_name: string; arg_type: arg_type; default_value: string option} + + (* `Func` type *) + type t = + { name: string + ; operator_name: string + ; overload_name: string + ; args: arg list (* ; returns: [`fixed of int | `dynamic] *) + ; returns: [`fixed of int | `dynamic | `bool | `int64_t | `double] + ; (* number of tensors that are returned *) + kind: [`function_ | `method_] } + + let arg_type_of_string str ~is_nullable = + match String.lowercase str with + | "bool" -> Some Bool + | "int64_t" -> Some (if is_nullable then Int64Option else Int64) + | "double" -> Some (if is_nullable then DoubleOption else Double) + | "at::tensor" -> Some (if is_nullable then TensorOption else Tensor) + | "at::tensoroptions" -> Some TensorOptions + | "at::intarrayref" -> Some IntList + | "const c10::list> &" -> Some TensorOptList + | "at::tensorlist" -> Some TensorList + | "at::device" -> Some Device + | "const at::scalar &" | "at::scalar" -> Some Scalar + | "at::scalartype" -> Some ScalarType + | "c10::string_view" -> Some String + | _ -> None + + let c_typed_args_list t = + List.map t.args ~f:(fun {arg_name; arg_type; _} -> + match arg_type with + | IntList -> + Printf.sprintf "int64_t *%s_data, int %s_len" arg_name arg_name + | TensorOptList | TensorList -> + Printf.sprintf "tensor *%s_data, int %s_len" arg_name arg_name + | TensorOptions -> + Printf.sprintf "int %s_kind, int %s_device" arg_name arg_name + | String -> Printf.sprintf "char* %s_ptr, int %s_len" arg_name arg_name + | Int64Option -> + Printf.sprintf "int64_t %s_v, uint8_t %s_null" arg_name arg_name + | DoubleOption -> + Printf.sprintf "double %s_v, uint8_t %s_null" arg_name arg_name + | otherwise -> + let simple_type_cstring = + match otherwise with + | Bool -> "int" + | Int64 -> "int64_t" + | Double -> "double" + | Tensor -> "tensor" + | TensorOption -> "tensor" + | ScalarType -> "int" + | Device -> "int" + | Scalar -> "scalar" + | Int64Option | DoubleOption | String | IntList | TensorOptList + |TensorList | TensorOptions -> + assert false + in + Printf.sprintf "%s %s" simple_type_cstring arg_name ) + |> String.concat ~sep:", " + + let c_args_list args = + List.map args ~f:(fun {arg_name; arg_type; _} -> + match arg_type with + | Scalar | Tensor -> "*" ^ arg_name + | TensorOption -> + Printf.sprintf "(%s ? *%s : torch::Tensor())" arg_name arg_name + | Bool -> "(bool)" ^ arg_name + | IntList -> + Printf.sprintf "torch::IntArrayRef(%s_data, %s_len)" arg_name + arg_name + | String -> + Printf.sprintf "std::string(%s_ptr, %s_len)" arg_name arg_name + | TensorOptList -> + Printf.sprintf "of_carray_tensor_opt(%s_data, %s_len)" arg_name + arg_name + | TensorList -> + Printf.sprintf "of_carray_tensor(%s_data, %s_len)" arg_name + arg_name + | TensorOptions -> + Printf.sprintf + "at::device(device_of_int(%s_device)).dtype(at::ScalarType(%s_kind))" + arg_name arg_name + | Int64Option -> + Printf.sprintf + "%s_null ? c10::nullopt : c10::optional(%s_v)" arg_name + arg_name + | DoubleOption -> + Printf.sprintf + "%s_null ? c10::nullopt : c10::optional(%s_v)" arg_name + arg_name + | ScalarType -> Printf.sprintf "at::ScalarType(%s)" arg_name + | Device -> Printf.sprintf "device_of_int(%s)" arg_name + | _ -> arg_name ) + |> String.concat ~sep:", " + + let c_call t = + match t.kind with + | `function_ -> Printf.sprintf "torch::%s(%s)" t.name (c_args_list t.args) + | `method_ -> ( + match t.args with + | head :: tail -> + Printf.sprintf "%s->%s(%s)" head.arg_name t.name (c_args_list tail) + | [] -> + Printf.failwithf "Method calls should have at least one argument %s" + t.name () ) + + (* + let replace_map = + Map.of_alist_exn + (module String) + [ ("t", "tr") + ; ("where", "where_") + ; ("view", "view_") + ; ("unsafe", "unsafe_") + ; ("to_device", "to_device_") ] + *) + + let is_method t = + List.exists t.args ~f:(fun arg -> + match arg.arg_name with "self" -> true | _ -> false ) + + let go_name name = + let last_underscore name = Str.string_match (Str.regexp ".*_$") name 0 in + let words = Str.split (Str.regexp "_") name in + if last_underscore name then + let cap_words = List.map words ~f:(fun word -> String.capitalize word) in + String.concat ~sep:"" cap_words ^ "_" + else + let cap_words = List.map words ~f:(fun word -> String.capitalize word) in + String.concat ~sep:"" cap_words + + let go_variable name = + let goname = go_name name in + (* NOTE: Deal with Go namespace conflict *) + let safe_name = + match goname with + | "Var" -> "vari" + | "Unsafe" -> "unsafety" + | _ -> goname + in + String.uncapitalize safe_name + + let c_go_args_list t = + List.map t.args ~f:(fun arg -> + let an = go_variable arg.arg_name in + let single_param = Printf.sprintf "%s %s" an in + match arg.arg_type with + | Bool -> single_param "int32" + | Int64 -> single_param "int64" + | Double -> single_param "float64" + | Tensor -> single_param "Ctensor" + | TensorOption -> single_param "Ctensor" + | Scalar -> single_param "Cscalar" + | ScalarType -> single_param "int32" + | Device -> single_param "int32" + | String -> single_param "string" + | IntList -> Printf.sprintf "%sData []int64, %sLen int" an an + | TensorOptList -> Printf.sprintf "%sData []Ctensor, %sLen int" an an + | TensorList -> Printf.sprintf "%sData []Ctensor, %sLen int" an an + | Int64Option -> Printf.sprintf "%sVal int64, %sNull int" an an + | DoubleOption -> Printf.sprintf "%sVal float64, %sNull int" an an + | TensorOptions -> Printf.sprintf "%sKind int32, %sDevice int32" an an + ) + |> String.concat ~sep:", " + + let c_go_args_list_notype t = + List.map t.args ~f:(fun arg -> + let an = go_variable arg.arg_name in + let an = match an with "var" -> "vari" | _ -> an in + let single_param = Printf.sprintf "%s %s" an in + match arg.arg_type with + | Bool -> Printf.sprintf "c%s" an + | Int64 -> Printf.sprintf "c%s" an + | Double -> Printf.sprintf "c%s" an + | Tensor -> Printf.sprintf "%s" an + | TensorOption -> Printf.sprintf "%s" an + | Scalar -> single_param "" + | ScalarType -> Printf.sprintf "c%s" an + | Device -> Printf.sprintf "c%s" an + | String -> Printf.sprintf "c%s, c%sLen" an an + | IntList -> Printf.sprintf "c%sDataPtr, c%sLen" an an + | TensorOptList -> Printf.sprintf "c%sDataPtr, c%sLen" an an + | TensorList -> Printf.sprintf "c%sDataPtr, c%sLen" an an + | Int64Option -> Printf.sprintf "c%sVal, c%sNull" an an + | DoubleOption -> Printf.sprintf "c%sVal, c%sNull" an an + | TensorOptions -> Printf.sprintf "c%sKind, c%sDevice" an an ) + |> String.concat ~sep:", " + + (* TODO: convert Go pointer to C pointer *) + let c_go_args_list_body t = + List.map t.args ~f:(fun arg -> + let an = go_variable arg.arg_name in + (* let single_param = Printf.sprintf "%s %s" an in *) + match arg.arg_type with + | Bool -> + Printf.sprintf "\nc%s := *(*C.int)(unsafe.Pointer(&%s))" an an + | Int64 -> + Printf.sprintf "\nc%s := *(*C.int64_t)(unsafe.Pointer(&%s))" an an + | Double -> + Printf.sprintf "\nc%s := *(*C.double)(unsafe.Pointer(&%s))" an an + | Tensor -> "" + | TensorOption -> "" + | Scalar -> "" + | ScalarType -> + Printf.sprintf "\nc%s := *(*C.int)(unsafe.Pointer(&%s))" an an + | Device -> + Printf.sprintf "\nc%s := *(*C.int)(unsafe.Pointer(&%s))" an an + | String -> + Printf.sprintf + "\n\ + c%s := C.CString(%s)\n\ + %sLen := len(%s)\n\ + c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" + an an an an an an + | IntList -> + Printf.sprintf + "\n\ + c%sDataPtr := (*C.int64_t)(unsafe.Pointer(&%sData[0]))\n\ + c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" + an an an an + | TensorOptList -> + Printf.sprintf + "\n\ + c%sDataPtr := (*Ctensor)(unsafe.Pointer(&%sData[0]))\n\ + c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" + an an an an + | TensorList -> + Printf.sprintf + "\n\ + c%sDataPtr := (*Ctensor)(unsafe.Pointer(&%sData[0]))\n\ + c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" + an an an an + | Int64Option -> + Printf.sprintf + "\n\ + c%sVal := *(*C.int64_t)(unsafe.Pointer(&%sVal))\n\ + c%sNull := *(*C.uint8_t)(unsafe.Pointer(&%sNull))" + an an an an + | DoubleOption -> + Printf.sprintf + "\n\ + c%sVal := *(*C.double)(unsafe.Pointer(&%sVal))\n\ + c%sNull := *(*C.uint8_t)(unsafe.Pointer(&%sNull))" + an an an an + | TensorOptions -> + Printf.sprintf + "\n\ + c%sKind := *(*C.int)(unsafe.Pointer(&%sKind))\n\ + c%sDevice := *(*C.int)(unsafe.Pointer(&%sDevice))" + an an an an ) + |> String.concat ~sep:"" + + let self_name = "self" + + let self_tensor arg = + match arg.arg_type with + | Tensor -> String.( = ) arg.arg_name self_name + | _ -> false + + (* + * let type_parameters t = + * let needs_scalar_parameter = + * List.exists t.args ~f:(fun arg -> + * match arg.arg_type with Scalar -> true | _ -> false ) + * in + * let needs_type_parameter = + * List.exists t.args ~f:(fun arg -> + * match arg.arg_type with + * | TensorList | TensorOption -> true + * | _ -> false ) + * in + * if needs_type_parameter && needs_scalar_parameter then "Tensor, Scalar" + * else if needs_type_parameter then "Tensor" + * else if needs_scalar_parameter then "Scalar" + * else "" + * *) + + (* + * let go_args_list t = + * (* https://ocaml.janestreet.com/ocaml-core/latest/doc/base/Base/List/#val-partition_tf *) + * (* TODO. implement special cases - TensorOptions, ... *) + * match List.partition_tf t.args ~f:self_tensor with _, args_list -> + * args_list + * *) + + let is_inplace t = + match Str.string_match (Str.regexp ".*_$") t.name 0 with + | true -> true + | _ -> false + + let go_typed_args_list t = + let to_string args = + let args_list = + List.map args ~f:(fun arg -> + let go_arg_type = + match arg.arg_type with + | Bool -> "bool" + | Int64 -> "int64" + | Double -> "float64" + | Tensor -> "*Tensor" + | TensorOption -> "*Tensor" + | IntList -> "[]int64" + | TensorOptList -> "[]Tensor" + | TensorList -> "[]Tensor" + | String -> "string" + (* TODO. Struct{Kind gotch.DType Device gotch.Device} *) + (* E.g. `type KindDevice struct{}` *) + | TensorOptions -> "gotch.KindDevice" + | Scalar -> "*Scalar" + | ScalarType -> "gotch.DType" + | Int64Option -> "[]int64" + | DoubleOption -> "[]float64" + | Device -> "gotch.Device" + in + match arg.arg_type with + | TensorOptions -> + Printf.sprintf "%sKind gotch.DType, %sDevice gotch.Device" + (go_variable arg.arg_name) (go_variable arg.arg_name) + | _ -> + Printf.sprintf "%s %s" (go_variable arg.arg_name) go_arg_type + ) + in + if is_method t && not (is_inplace t) then + args_list @ ["del bool"] |> String.concat ~sep:", " + else args_list |> String.concat ~sep:", " + in + (* let self_arg = "self Tensor" in *) + match List.partition_tf t.args ~f:self_tensor with _, args_list -> + Printf.sprintf "%s" (to_string args_list) + + let go_notype_args_list t = + let to_string args = + let args_list = + List.map args ~f:(fun arg -> + match arg.arg_type with + | TensorOptions -> + Printf.sprintf "%sKind, %sDevice" (go_variable arg.arg_name) + (go_variable arg.arg_name) + | _ -> Printf.sprintf "%s" (go_variable arg.arg_name) ) + in + if is_method t && not (is_inplace t) then + args_list @ ["del"] |> String.concat ~sep:", " + else args_list |> String.concat ~sep:", " + in + match List.partition_tf t.args ~f:self_tensor with _, args_list -> + Printf.sprintf "%s" (to_string args_list) + + let go_return_type t ~fallible = + (* printf "t name: %s\n" t.name ; *) + let returns = + match t.returns with + | `fixed 1 -> "retVal *Tensor" + | `fixed v -> + List.init v ~f:(fun i -> Printf.sprintf "retVal%d *Tensor" i) + |> String.concat ~sep:", " |> Printf.sprintf "%s" + | `dynamic -> "retVal []Tensor" + | `bool -> "retVal bool" + | `int64_t -> "retVal int64" + | `double -> "retVal float64" + in + if is_inplace t then + if fallible then Printf.sprintf "err error" else Printf.sprintf "" + else if fallible then Printf.sprintf "%s, err error" returns + else Printf.sprintf "%s" returns + + let go_return_notype t ~fallible = + let returns = + match t.returns with + | `fixed 1 -> "retVal" + | `fixed v -> + List.init v ~f:(fun i -> Printf.sprintf "retVal%d" i) + |> String.concat ~sep:", " |> Printf.sprintf "%s" + | `dynamic -> "retVal" + | `bool -> "retVal" + | `int64_t -> "retVal" + | `double -> "retVal" + in + if is_inplace t then + if fallible then Printf.sprintf "err" else Printf.sprintf "" + else if fallible then Printf.sprintf "%s, err" returns + else Printf.sprintf "%s" returns + + let go_binding_args t = + List.map t.args ~f:(fun arg -> + let name = go_variable arg.arg_name in + match arg.arg_type with + | Tensor -> + if String.( = ) name "self" then "ts.ctensor" + else Printf.sprintf "%s.ctensor" name + | Scalar -> Printf.sprintf "%s.cscalar" name + | Bool -> Printf.sprintf "c%s" name + | ScalarType -> Printf.sprintf "%s.CInt()" name + | Device -> Printf.sprintf "%s.CInt()" name + | TensorOptions -> + Printf.sprintf "%sKind.CInt(), %sDevice.CInt()" name name + | String -> Printf.sprintf "%s" name + | IntList -> Printf.sprintf "%s, len(%s)" name name + | TensorList -> Printf.sprintf "c%s, len(c%s)" name name + | Int64Option -> Printf.sprintf "c%sVal, c%sNull" name name + | DoubleOption -> Printf.sprintf "c%sVal, c%sNull" name name + | TensorOption -> Printf.sprintf "%s.ctensor" name + | _ -> name ) + |> String.concat ~sep:", " + + let go_binding_body t = + List.map t.args ~f:(fun arg -> + let an = go_variable arg.arg_name in + match arg.arg_type with + | Bool -> + Printf.sprintf "c%s := int32(0)\n if %s { c%s = int32(1) }\n" an an + an + | Int64 -> "" + | Double -> "" + | Tensor -> "" + | TensorOption -> "" + | Scalar -> "" + | ScalarType -> "" + | Device -> "" + | String -> "" + | IntList -> "" + | Int64Option -> + Printf.sprintf + "var c%sVal int64 = 0\n\ + \ var c%sNull int = 1\n\ + \ if len(%s) > 0 {\n\ + \ c%sVal = %s[0]\n\ + \ c%sNull = 0\n\ + \ }\n" + an an an an an an + | DoubleOption -> + Printf.sprintf + "var c%sVal float64 = 0.0\n\ + \ var c%sNull int = 1\n\ + \ if len(%s) > 0 {\n\ + \ c%sVal = %s[0]\n\ + \ c%sNull = 0\n\ + \ }\n" + an an an an an an + | TensorOptList -> + Printf.sprintf + " var c%s []lib.Ctensor\n\ + \ for _, t := range %s {c%s = append(c%s, t.ctensor)}\n" + an an an an + | TensorList -> + Printf.sprintf + " var c%s []lib.Ctensor\n\ + \ for _, t := range %s {c%s = append(c%s, t.ctensor)}\n" + an an an an + | TensorOptions -> "" ) + |> String.concat ~sep:"" +end + +exception Not_a_simple_arg + +let read_yaml filename = + let funcs = + (* Split the file to avoid Yaml.of_string_exn segfaulting. *) + In_channel.with_file filename ~f:In_channel.input_lines + |> List.group ~break:(fun _ l -> + String.length l > 0 && Char.( = ) l.[0] '-' ) + |> List.concat_map ~f:(fun lines -> + Yaml.of_string_exn (String.concat lines ~sep:"\n") |> extract_list + ) + in + printf "Read %s, got %d functions.\n%!" filename (List.length funcs) ; + List.filter_map funcs ~f:(fun yaml -> + let map = extract_map yaml in + let name = Map.find_exn map "name" |> extract_string in + let operator_name = Map.find_exn map "operator_name" |> extract_string in + let overload_name = Map.find_exn map "overload_name" |> extract_string in + let deprecated = Map.find_exn map "deprecated" |> extract_bool in + let method_of = + Map.find_exn map "method_of" + |> extract_list |> List.map ~f:extract_string + in + let arguments = Map.find_exn map "arguments" |> extract_list in + let returns = + let is_tensor returns = + let returns = extract_map returns in + let return_type = + Map.find_exn returns "dynamic_type" |> extract_string + in + String.( = ) return_type "at::Tensor" + in + let returns = Map.find_exn map "returns" |> extract_list in + if List.for_all returns ~f:is_tensor then + Some (`fixed (List.length returns)) + else + match returns with + | [returns] -> ( + let return_type = + Map.find_exn (extract_map returns) "dynamic_type" + |> extract_string + in + match return_type with + | "bool" -> Some `bool + | "int64_t" -> Some `int64_t + | "double" -> Some `double + | "at::TensorList" + |"dynamic_type: const c10::List> &" -> + Some `dynamic + | _ -> None ) + | [] | _ :: _ :: _ -> None + in + let kind = + if List.exists method_of ~f:(String.( = ) "namespace") then + Some `function_ + else if List.exists method_of ~f:(String.( = ) "Tensor") then + Some `method_ + else None + in + if + (not deprecated) + && (not + (List.exists excluded_prefixes ~f:(fun prefix -> + String.is_prefix name ~prefix ))) + && (not + (List.exists excluded_suffixes ~f:(fun suffix -> + String.is_suffix name ~suffix ))) + && not (Set.mem excluded_functions name) + then + Option.both returns kind + |> Option.bind ~f:(fun (returns, kind) -> + try + let args = + List.filter_map arguments ~f:(fun arg -> + let arg = extract_map arg in + let arg_name = + Map.find_exn arg "name" |> extract_string + in + let arg_type = + Map.find_exn arg "dynamic_type" |> extract_string + in + let is_nullable = + Map.find arg "is_nullable" + |> Option.value_map ~default:false ~f:extract_bool + in + let default_value = + Map.find arg "default" |> Option.map ~f:extract_string + in + match Func.arg_type_of_string arg_type ~is_nullable with + | Some Scalar + when Option.is_some default_value && not is_nullable + -> + None + | Some TensorOptions + when Option.is_some default_value + && Set.mem no_tensor_options name -> + None + | Some arg_type -> + let arg_name = + match (arg_name, arg_type) with + | "self", Scalar -> "self_scalar" + | _, _ -> arg_name + in + Some {Func.arg_name; arg_type; default_value} + | None -> + if Option.is_some default_value then None + else raise Not_a_simple_arg ) + in + Some + { Func.name + ; operator_name + ; overload_name + ; args + ; returns + ; kind } + with Not_a_simple_arg -> None ) + else None ) + +let p out_channel s = + Printf.ksprintf + (fun line -> + Out_channel.output_string out_channel line ; + Out_channel.output_char out_channel '\n' ) + s + +let print_inline out_channel s = + Printf.ksprintf (fun msg -> Out_channel.output_string out_channel msg) s + +let write_cpp funcs filename = + Out_channel.with_file (filename ^ ".cpp.h") ~f:(fun out_cpp -> + Out_channel.with_file (filename ^ ".h") ~f:(fun out_h -> + let pc s = p out_cpp s in + let ph s = p out_h s in + pc "// THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; + pc "" ; + ph "// THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; + ph "" ; + Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + let c_typed_args_list = Func.c_typed_args_list func in + match func.returns with + | `fixed ntensors -> + pc "void atg_%s(tensor *out__, %s) {" exported_name + c_typed_args_list ; + pc " PROTECT(" ; + pc " auto outputs__ = %s;" (Func.c_call func) ; + if ntensors = 1 then + pc " out__[0] = new torch::Tensor(outputs__);" + else + for i = 0 to ntensors - 1 do + pc + " out__[%d] = new \ + torch::Tensor(std::get<%d>(outputs__));" + i i + done ; + pc " )" ; + pc "}" ; + pc "" ; + ph "void atg_%s(tensor *, %s);" exported_name + c_typed_args_list + | `dynamic -> + pc "tensor *atg_%s(%s) {" exported_name c_typed_args_list ; + pc " PROTECT(" ; + pc " auto outputs__ = %s;" (Func.c_call func) ; + (* the returned type is a C++ vector of tensors *) + pc " int sz = outputs__.size();" ; + pc + " torch::Tensor **out__ = (torch::Tensor**)malloc((sz \ + + 1) * sizeof(torch::Tensor*));" ; + pc " for (int i = 0; i < sz; ++i)" ; + pc " out__[i] = new torch::Tensor(outputs__[i]);" ; + pc " out__[sz] = nullptr;" ; + pc " return out__;" ; + pc " )" ; + pc " return nullptr;" ; + pc "}" ; + pc "" ; + ph "tensor *atg_%s(%s);" exported_name c_typed_args_list + | (`bool | `int64_t | `double) as returns -> + let c_type = + match returns with + | `bool -> "int" + | `int64_t -> "int64_t" + | `double -> "double" + in + pc "%s atg_%s(%s) {" c_type exported_name c_typed_args_list ; + pc " PROTECT(" ; + pc " return %s;" (Func.c_call func) ; + pc " )" ; + pc " return 0;" ; + pc "}" ; + pc "" ; + ph "%s atg_%s(%s);" c_type exported_name c_typed_args_list ) + ) ) + +let write_wrapper funcs filename = + Out_channel.with_file filename ~f:(fun out_ml -> + let pm s = print_inline out_ml s in + pm "package ts" ; + pm "\n\n" ; + pm "// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; + pm "\n\n" ; + pm "// #include \"stdlib.h\"\n" ; + pm "import \"C\"" ; + pm "" ; + pm "\n\n" ; + pm "import(\n" ; + pm " \"unsafe\"\n" ; + pm "\n" ; + pm " \"github.com/sugarme/gotch\"\n" ; + pm " lib \"github.com/sugarme/gotch/libtch\"\n" ; + pm ")" ; + pm "\n\n" ; + Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + let is_method = Func.is_method func in + let is_inplace = Func.is_inplace func in + (* NOTE. `torch.__PATTERN` *) + let prefix_2underscore exported_name = + Str.string_match (Str.regexp "^__") exported_name 0 + in + (* NOTE. `torch._PATTERN` *) + let prefix_1underscore exported_name = + Str.string_match (Str.regexp "^_") exported_name 0 + in + (* NOTE. `torch.PATTERN_1` *) + let suffix_1 exported_name = + Str.string_match (Str.regexp ".*_1$") exported_name 0 + in + let gofunc_name = + if prefix_2underscore exported_name then + "__" ^ Func.go_name exported_name + else if prefix_1underscore exported_name then + "_" ^ Func.go_name exported_name + else if suffix_1 exported_name then + Func.go_name exported_name ^ "_" + else Func.go_name exported_name + in + let cfunc_name = "lib.Atg" ^ gofunc_name in + let go_args_list = Func.go_typed_args_list func in + (* NOTE. temporarily excluding these functions as not implemented at FFI *) + (* TODO. implement multiple tensors return function []Tensor *) + let excluded_funcs = + [ "Chunk" + ; "AlignTensors" + ; "BroadcastTensors" + ; "Meshgrid" + ; "MeshgridIndexing" + ; "_ToCpu" + ; "NonzeroNumpy" + ; "Split" + ; "SplitWithSizes" + ; "Unbind" + ; "Where" + ; "Atleast1d1" + ; "Atleast2d1" + ; "Atleast3d1" + ; "Dequantize1" + ; "QuantizePerTensor1" + ; "UnsafeChunk" + ; "UnsafeSplit" + ; "UnsafeSplitWithSizes" + ; "AlignTensors" + ; "UnflattenDenseTensors" + ; "TensorSplit" + ; "TensorSplitIndices" + ; "TensorSplitTensorIndicesOrSections" + ; "QuantizePerTensorTensors" + ; "Dsplit" + ; "DsplitArray" + ; "Hsplit" + ; "HsplitArray" + ; "Vsplit" + ; "VsplitArray" + ; "DequantizeTensors" + ; "Atleast1dSequence" + ; "Atleast2dSequence" + ; "Atleast3dSequence" + ; "Index" + ; "IndexPut" + ; "IndexPut_" + ; "_IndexPutImpl_" ] + in + if + List.exists excluded_funcs ~f:(fun name -> + String.( = ) name gofunc_name ) + then pm "" + else + match func.returns with + | `dynamic -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm " if del { defer ts.MustDrop() }\n" ; + pm " ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))\n" ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm " %s(ptr, %s)\n" cfunc_name (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + (* NOTE. if in_place method, no retVal return *) + if not (Func.is_inplace func) then + pm " retVal = &Tensor{ctensor: *ptr}\n" + else pm " ts.ctensor = *ptr\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" + | `fixed 1 -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm " if del { defer ts.MustDrop() }\n" ; + pm " ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))\n" ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm " %s(ptr, %s)\n" cfunc_name (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + (* NOTE. if in_place method, no retVal return *) + if not (Func.is_inplace func) then + pm " retVal = &Tensor{ctensor: *ptr}\n" + else pm " ts.ctensor = *ptr\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" + | `fixed ntensors -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm " if del { defer ts.MustDrop() }\n" ; + for i = 0 to ntensors - 1 do + (* pc " out__[%d] = new torch::Tensor(std::get<%d>(outputs__));" i i *) + if i = 0 then + pm + " ctensorPtr0 := \ + (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))\n" + else + pm + " ctensorPtr%d := \ + (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr%d)) \ + + unsafe.Sizeof(ctensorPtr0)))\n" + i (i - 1) + done ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm " %s(ctensorPtr0, %s)\n" cfunc_name + (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + (* NOTE. if in_place method, no retVal return *) + if not (Func.is_inplace func) then + for i = 0 to ntensors - 1 do + pm " retVal%d = &Tensor{ctensor: *ctensorPtr%d}\n" i i + done + else pm " ts.ctensor = *ptr\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" + | `bool -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm " if del { defer ts.MustDrop() }\n" ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm " retVal = %s(%s)\n" cfunc_name (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" + | `int64_t -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm " if del { defer ts.MustDrop() }\n" ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm " retVal = %s(%s)\n" cfunc_name (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" + | `double -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm "if del { defer ts.MustDrop() }\n" ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm " retVal = %s(%s)\n" cfunc_name (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" ) ; + pm "// End of implementing Tensor ================================= \n" + ) + +let write_must_wrapper funcs filename = + Out_channel.with_file filename ~f:(fun out_ml -> + let pm s = print_inline out_ml s in + pm "package ts" ; + pm "\n\n" ; + pm "// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; + pm "\n\n" ; + pm "import(\n" ; + pm " \"log\"\n" ; + pm "\n" ; + pm " \"github.com/sugarme/gotch\"\n" ; + pm ")" ; + pm "\n\n" ; + Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + let is_method = Func.is_method func in + (* NOTE. `torch.__PATTERN` *) + let prefix_2underscore exported_name = + Str.string_match (Str.regexp "^__") exported_name 0 + in + (* NOTE. `torch._PATTERN` *) + let prefix_1underscore exported_name = + Str.string_match (Str.regexp "^_") exported_name 0 + in + (* NOTE. `torch.PATTERN_1` *) + let suffix_1 exported_name = + Str.string_match (Str.regexp ".*_1$") exported_name 0 + in + let gofunc_name = + if prefix_2underscore exported_name then + "__" ^ Func.go_name exported_name + else if prefix_1underscore exported_name then + "_" ^ Func.go_name exported_name + else if suffix_1 exported_name then + Func.go_name exported_name ^ "_" + else Func.go_name exported_name + in + let go_args_list = Func.go_typed_args_list func in + let go_args_list_notype = Func.go_notype_args_list func in + (* NOTE. temporarily excluding these functions as not implemented at FFI *) + let excluded_funcs = + [ "Chunk" + ; "AlignTensors" + ; "BroadcastTensors" + ; "Meshgrid" + ; "MeshgridIndexing" + ; "_ToCpu" + ; "NonzeroNumpy" + ; "Split" + ; "SplitWithSizes" + ; "Unbind" + ; "Where" + ; "Atleast1d1" + ; "Atleast2d1" + ; "Atleast3d1" + ; "Dequantize1" + ; "QuantizePerTensor1" + ; "UnsafeChunk" + ; "UnsafeSplit" + ; "UnsafeSplitWithSizes" + ; "AlignTensors" + ; "UnflattenDenseTensors" + ; "TensorSplit" + ; "TensorSplitIndices" + ; "TensorSplitTensorIndicesOrSections" + ; "QuantizePerTensorTensors" + ; "Dsplit" + ; "DsplitArray" + ; "Hsplit" + ; "HsplitArray" + ; "Vsplit" + ; "VsplitArray" + ; "DequantizeTensors" + ; "Atleast1dSequence" + ; "Atleast2dSequence" + ; "Atleast3dSequence" + ; "Index" + ; "IndexPut" + ; "IndexPut_" + ; "_IndexPutImpl_" ] + in + if + List.exists excluded_funcs ~f:(fun name -> + String.( = ) name gofunc_name ) + then pm "" + else + match func.returns with + | `dynamic -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + if is_method then + pm " retVal, err := ts.%s(%s)\n" gofunc_name + go_args_list_notype + else + pm " retVal, err := %s(%s)\n" gofunc_name + go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" + | `fixed 1 -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + (* NOTE. No return retVal for in_place method *) + if Func.is_inplace func then + if is_method then + pm " err := ts.%s(%s)\n" gofunc_name go_args_list_notype + else pm " err := %s(%s)\n" gofunc_name go_args_list_notype + else if is_method then + pm " retVal, err := ts.%s(%s)\n" gofunc_name + go_args_list_notype + else + pm " retVal, err := %s(%s)\n" gofunc_name + go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" + | `fixed _ -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + (* NOTE. No return retVal for in_place method *) + if Func.is_inplace func then + if is_method then + pm " err := ts.%s(%s)\n" gofunc_name go_args_list_notype + else pm " err := %s(%s)\n" gofunc_name go_args_list_notype + else if is_method then + pm " %s, err := ts.%s(%s)\n" + (Func.go_return_notype func ~fallible:false) + gofunc_name go_args_list_notype + else + pm " %s, err := %s(%s)\n" + (Func.go_return_notype func ~fallible:false) + gofunc_name go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" + | `bool -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + if is_method then + pm " retVal, err := ts.%s(%s)\n" gofunc_name + go_args_list_notype + else + pm " retVal, err := %s(%s)\n" gofunc_name + go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" + | `int64_t -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + if is_method then + pm " retVal, err := ts.%s(%s)\n" gofunc_name + go_args_list_notype + else + pm " retVal, err := %s(%s)\n" gofunc_name + go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" + | `double -> + pm "\n" ; + if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + if is_method then + pm " retVal, err := ts.%s(%s)\n" gofunc_name + go_args_list_notype + else + pm " retVal, err := %s(%s)\n" gofunc_name + go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" ) ; + pm "// End of implementing Tensor ================================= \n" + ) + +let write_ffi funcs filename = + Out_channel.with_file filename ~f:(fun out_ml -> + let pm s = p out_ml s in + pm "package libtch" ; + pm "" ; + pm "// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; + pm "" ; + pm "//#include \"stdbool.h\" " ; + pm "//#include \"torch_api.h\" " ; + pm "import \"C\"" ; + pm "" ; + pm "import \"unsafe\"" ; + pm "" ; + Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + (* let is_method = *) + (* match func.Func.kind with `method_ -> true | `function_ -> false *) + (* in *) + (* let is_inplace = *) + (* Func.is_inplace func *) + (* + * match exported_name with + * | "add_1" -> true + * | "sub_1" -> true + * | "div_1" -> true + * | "mul_1" -> true + * | _ -> false + * *) + (* in *) + (* NOTE. `torch.__PATTERN` *) + let prefix_2underscore exported_name = + Str.string_match (Str.regexp "^__") exported_name 0 + in + (* NOTE. `torch._PATTERN` *) + let prefix_1underscore exported_name = + Str.string_match (Str.regexp "^_") exported_name 0 + in + (* NOTE. `torch.PATTERN_1` *) + let suffix_1 exported_name = + Str.string_match (Str.regexp ".*_1$") exported_name 0 + in + let ffifunc_name = + if prefix_2underscore exported_name then + "__" ^ Func.go_name exported_name + else if prefix_1underscore exported_name then + "_" ^ Func.go_name exported_name + else if suffix_1 exported_name then + Func.go_name exported_name ^ "_" + else Func.go_name exported_name + in + match func.Func.returns with + | `fixed _ -> + pm "func Atg%s(ptr *Ctensor, %s){%s \n\tC.atg_%s(ptr, %s)\n}" + ffifunc_name (Func.c_go_args_list func) + (Func.c_go_args_list_body func) + exported_name + (Func.c_go_args_list_notype func) + | `dynamic -> pm "" + | `bool -> + pm "func Atg%s(%s) bool{%s" ffifunc_name + (Func.c_go_args_list func) + (Func.c_go_args_list_body func) ; + pm "\t cResult := C.atg_%s(%s)" exported_name + (Func.c_go_args_list_notype func) ; + pm "\t cbool := *(*int)(unsafe.Pointer(&cResult))" ; + pm "\t if cbool == 1{return true}" ; + pm "\t return false" ; + pm "}" + | `int64_t -> + pm "func Atg%s(%s) int64{%s" ffifunc_name + (Func.c_go_args_list func) + (Func.c_go_args_list_body func) ; + pm "\t cResult := C.atg_%s(%s)" exported_name + (Func.c_go_args_list_notype func) ; + pm "\t return *(*int64)(unsafe.Pointer(&cResult))" ; + pm "}" + | `double -> + pm "func Atg%s(%s) float64{%s" ffifunc_name + (Func.c_go_args_list func) + (Func.c_go_args_list_body func) ; + pm "\t cResult := C.atg_%s(%s)" exported_name + (Func.c_go_args_list_notype func) ; + pm "\t return *(*float64)(unsafe.Pointer(&cResult))" ; + pm "}" + (* TODO: need more implement here *) + (* pm "func Atg%s(%s)(retValPtr *Ctensor)" *) + (* (Func.go_name exported_name) *) + (* (Func.c_go_args_list func) *) ) ) + +let methods = + let c name args = + { Func.name + ; operator_name= name + ; overload_name= "" + ; args + ; returns= `fixed 1 + ; kind= `method_ } + in + let ca arg_name arg_type = {Func.arg_name; arg_type; default_value= None} in + [ c "grad" [ca "self" Tensor] + ; c "set_requires_grad" [ca "self" Tensor; ca "r" Bool] + ; c "toType" [ca "self" Tensor; ca "scalar_type" ScalarType] + ; c "to" [ca "self" Tensor; ca "device" Device] ] + +let run ~yaml_filename ~cpp_filename ~ffi_filename ~must_wrapper_filename + ~wrapper_filename = + let funcs = read_yaml yaml_filename in + let funcs = methods @ funcs in + printf "Generating code for %d functions.\n%!" (List.length funcs) ; + (* Generate some unique names for overloaded functions. *) + let funcs = + List.map funcs ~f:(fun func -> (String.lowercase func.operator_name, func)) + |> Map.of_alist_multi (module String) + |> Map.to_alist + |> List.concat_map ~f:(fun (name, funcs) -> + match funcs with + | [] -> assert false + | [func] -> [(name, func)] + | funcs -> + let has_empty_overload = + List.exists funcs ~f:(fun (func : Func.t) -> + String.is_empty func.overload_name ) + in + List.sort funcs ~compare:(fun (f1 : Func.t) (f2 : Func.t) -> + match + Int.compare (String.length f1.name) + (String.length f2.name) + with + | 0 -> + Int.compare (List.length f1.args) (List.length f2.args) + | cmp -> cmp ) + |> List.mapi ~f:(fun index (func : Func.t) -> + let operator_name = + String.lowercase func.operator_name + in + let overload_name = + String.lowercase func.overload_name + in + let name = + if + String.is_empty overload_name + || (index = 0 && not has_empty_overload) + then operator_name + else if String.is_suffix operator_name ~suffix:"_" then + operator_name ^ overload_name ^ "_" + else operator_name ^ "_" ^ overload_name + in + (name, func) ) ) + |> Map.of_alist_exn (module String) + in + write_cpp funcs cpp_filename ; + write_ffi funcs ffi_filename ; + write_must_wrapper funcs must_wrapper_filename ; + write_wrapper funcs wrapper_filename + +let () = + run ~yaml_filename:"gen/pytorch/Declarations-v1.10.0.yaml" + ~cpp_filename:"libtch/torch_api_generated" + ~ffi_filename:"libtch/c-generated.go" + ~must_wrapper_filename:"ts/must-tensor-generated.go" + ~wrapper_filename:"ts/tensor-generated.go" diff --git a/gen/pytorch/Declarations-v1.11.0.yaml b/gen/pytorch/Declarations-v1.11.0.yaml new file mode 100644 index 0000000..8f96e7f --- /dev/null +++ b/gen/pytorch/Declarations-v1.11.0.yaml @@ -0,0 +1,139252 @@ +- name: _cast_Byte + operator_name: _cast_Byte + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cast_Char + operator_name: _cast_Char + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cast_Double + operator_name: _cast_Double + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cast_Float + operator_name: _cast_Float + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cast_Int + operator_name: _cast_Int + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cast_Long + operator_name: _cast_Long + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cast_Short + operator_name: _cast_Short + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cast_Half + operator_name: _cast_Half + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _backward + operator_name: _backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: inputs + type: at::TensorList + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: gradient + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: retain_graph + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: create_graph + type: bool + schema_order_cpp_signature: void (const at::Tensor &, at::TensorList, const c10::optional &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: inputs + type: at::TensorList + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: gradient + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: retain_graph + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: create_graph + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: set_data + operator_name: set_data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::set_data(Tensor(a!) self, Tensor new_data) -> () + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: new_data + type: const at::Tensor & + schema_order_cpp_signature: void (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: new_data + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: data + operator_name: data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::data(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_leaf + operator_name: is_leaf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_leaf(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: output_nr + operator_name: output_nr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::output_nr(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _version + operator_name: _version + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_version(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: requires_grad_ + operator_name: requires_grad_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: requires_grad + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: requires_grad + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: retain_grad + operator_name: retain_grad + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::retain_grad(Tensor(a!) self) -> () + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: void (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: retains_grad + operator_name: retains_grad + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::retains_grad(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _fw_primal + operator_name: _fw_primal + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _make_dual + operator_name: _make_dual + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: primal + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tangent + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: primal + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tangent + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unpack_dual + operator_name: _unpack_dual + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: dual + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: dual + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: primal + name: primal + type: at::Tensor + - dynamic_type: at::Tensor + field_name: tangent + name: tangent + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _new_zeros_with_same_feature_meta + operator_name: _new_zeros_with_same_feature_meta + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: self_num_batch_dims + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: self_num_batch_dims + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _has_same_storage_numel + operator_name: _has_same_storage_numel + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rename_ + operator_name: rename_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + name: names + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + name: names + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rename + operator_name: rename + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + name: names + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + name: names + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: align_to + operator_name: align_to + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: names + type: at::DimnameList + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: names + type: at::DimnameList + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: align_to + operator_name: align_to + overload_name: ellipsis_idx + manual_kernel_registration: false + category_override: '' + schema_string: aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: order + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ellipsis_idx + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: order + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ellipsis_idx + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: align_as + operator_name: align_as + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::align_as(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: align_tensors + operator_name: align_tensors + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::align_tensors(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _assert_async + operator_name: _assert_async + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_assert_async(Tensor self) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: void (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: refine_names + operator_name: refine_names + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: names + type: at::DimnameList + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: names + type: at::DimnameList + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _use_cudnn_ctc_loss + operator_name: _use_cudnn_ctc_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_ctc_loss + operator_name: _cudnn_ctc_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _use_cudnn_rnn_flatten_weight + operator_name: _use_cudnn_rnn_flatten_weight + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_use_cudnn_rnn_flatten_weight() -> bool + arguments: [] + schema_order_cpp_signature: bool () + schema_order_arguments: [] + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cudnn_rnn_flatten_weight + operator_name: _cudnn_rnn_flatten_weight + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight_arr + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: input_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: proj_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t, int64_t, int64_t, int64_t, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight_arr + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: input_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: proj_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_rnn + operator_name: _cudnn_rnn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight_buf + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: proj_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const c10::optional &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight_buf + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: proj_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_rnn_backward + operator_name: _cudnn_rnn_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_buf + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_output + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: proj_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_buf + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_output + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: proj_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::TensorList + name: result3 + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_init_dropout_state + operator_name: _cudnn_init_dropout_state + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dropout_seed + type: int64_t + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (double, bool, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dropout_seed + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _debug_has_internal_overlap + operator_name: _debug_has_internal_overlap + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_debug_has_internal_overlap(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _fused_dropout + operator_name: _fused_dropout + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _masked_scale + operator_name: _masked_scale + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_dropout + operator_name: native_dropout + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: true + name: train + type: c10::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: true + name: train + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_dropout_backward + operator_name: native_dropout_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sobol_engine_draw + operator_name: _sobol_engine_draw + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: quasi + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sobolstate + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: quasi + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sobolstate + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sobol_engine_ff_ + operator_name: _sobol_engine_ff_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sobolstate + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sobolstate + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sobol_engine_scramble_ + operator_name: _sobol_engine_scramble_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: ltm + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: ltm + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sobol_engine_initialize_state_ + operator_name: _sobol_engine_initialize_state_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _reshape_from_tensor + operator_name: _reshape_from_tensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: shape + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: shape + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _shape_as_tensor + operator_name: _shape_as_tensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_shape_as_tensor(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: dropout + operator_name: dropout + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: dropout_ + operator_name: dropout_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: feature_dropout + operator_name: feature_dropout + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::feature_dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: feature_dropout_ + operator_name: feature_dropout_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: alpha_dropout + operator_name: alpha_dropout + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: alpha_dropout_ + operator_name: alpha_dropout_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: feature_alpha_dropout + operator_name: feature_alpha_dropout + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: feature_alpha_dropout_ + operator_name: feature_alpha_dropout_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: abs + operator_name: abs + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::abs(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: abs_ + operator_name: abs_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::abs_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: abs_out + operator_name: abs + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: absolute + operator_name: absolute + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::absolute(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: absolute_ + operator_name: absolute_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::absolute_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: absolute_out + operator_name: absolute + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: angle + operator_name: angle + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::angle(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: angle_out + operator_name: angle + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: view_as_real + operator_name: view_as_real + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::view_as_real(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: view_as_complex + operator_name: view_as_complex + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::view_as_complex(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sgn + operator_name: sgn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sgn(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sgn_ + operator_name: sgn_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sgn_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sgn_out + operator_name: sgn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: real + operator_name: real + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::real(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: imag + operator_name: imag + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::imag(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _conj + operator_name: _conj + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_conj(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conj + operator_name: conj + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conj(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _conj_physical + operator_name: _conj_physical + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_conj_physical(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conj_physical + operator_name: conj_physical + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conj_physical(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conj_physical_out + operator_name: conj_physical + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conj_physical_ + operator_name: conj_physical_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: resolve_conj + operator_name: resolve_conj + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::resolve_conj(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: resolve_neg + operator_name: resolve_neg + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::resolve_neg(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _neg_view + operator_name: _neg_view + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_neg_view(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acos + operator_name: acos + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::acos(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acos_ + operator_name: acos_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::acos_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acos_out + operator_name: acos + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccos + operator_name: arccos + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arccos(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arccos_ + operator_name: arccos_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arccos_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arccos_out + operator_name: arccos + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: avg_pool1d + operator_name: avg_pool1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: adaptive_avg_pool1d + operator_name: adaptive_avg_pool1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: adaptive_max_pool1d + operator_name: adaptive_max_pool1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: add + operator_name: add + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add_ + operator_name: add_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add_out + operator_name: add + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu + operator_name: _add_relu + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu_ + operator_name: _add_relu_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu_out + operator_name: _add_relu + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu + operator_name: _add_relu + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu_ + operator_name: _add_relu_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add + operator_name: add + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add_ + operator_name: add_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmv + operator_name: addmv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmv_ + operator_name: addmv_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmv_out + operator_name: addmv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addr + operator_name: addr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: addr_ + operator_name: addr_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addr_out + operator_name: addr + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: affine_grid_generator + operator_name: affine_grid_generator + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: theta + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: theta + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: affine_grid_generator_backward + operator_name: affine_grid_generator_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: all + operator_name: all + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all_out + operator_name: all + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all + operator_name: all + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: all_out + operator_name: all + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: allclose + operator_name: allclose + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: any + operator_name: any + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any_out + operator_name: any + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any + operator_name: any + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: any_out + operator_name: any + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arange + operator_name: arange + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arange + operator_name: arange + overload_name: start + manual_kernel_registration: false + category_override: '' + schema_string: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arange + operator_name: arange + overload_name: start_step + manual_kernel_registration: false + category_override: '' + schema_string: aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arange_out + operator_name: arange + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arange_out + operator_name: arange + overload_name: start_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dim_arange + operator_name: _dim_arange + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_dim_arange(Tensor like, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: like + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: like + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: argmax + operator_name: argmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: argmax_out + operator_name: argmax + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: argmin + operator_name: argmin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: argmin_out + operator_name: argmin + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acosh + operator_name: acosh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::acosh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acosh_ + operator_name: acosh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::acosh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acosh_out + operator_name: acosh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccosh + operator_name: arccosh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arccosh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arccosh_ + operator_name: arccosh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arccosh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arccosh_out + operator_name: arccosh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: asinh + operator_name: asinh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::asinh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asinh_ + operator_name: asinh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::asinh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asinh_out + operator_name: asinh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsinh + operator_name: arcsinh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arcsinh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arcsinh_ + operator_name: arcsinh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arcsinh_out + operator_name: arcsinh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atanh + operator_name: atanh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atanh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atanh_ + operator_name: atanh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atanh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atanh_out + operator_name: atanh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctanh + operator_name: arctanh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctanh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arctanh_ + operator_name: arctanh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctanh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arctanh_out + operator_name: arctanh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: as_strided + operator_name: as_strided + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: as_strided_ + operator_name: as_strided_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: const at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: asin + operator_name: asin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::asin(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asin_ + operator_name: asin_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::asin_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asin_out + operator_name: asin + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsin + operator_name: arcsin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arcsin(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arcsin_ + operator_name: arcsin_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arcsin_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arcsin_out + operator_name: arcsin + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atan + operator_name: atan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan_ + operator_name: atan_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atan_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan_out + operator_name: atan + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctan + operator_name: arctan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arctan_ + operator_name: arctan_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctan_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arctan_out + operator_name: arctan + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atleast_1d + operator_name: atleast_1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atleast_1d(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atleast_1d + operator_name: atleast_1d + overload_name: Sequence + manual_kernel_registration: false + category_override: '' + schema_string: aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atleast_2d + operator_name: atleast_2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atleast_2d(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atleast_2d + operator_name: atleast_2d + overload_name: Sequence + manual_kernel_registration: false + category_override: '' + schema_string: aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atleast_3d + operator_name: atleast_3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atleast_3d(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: atleast_3d + operator_name: atleast_3d + overload_name: Sequence + manual_kernel_registration: false + category_override: '' + schema_string: aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: baddbmm + operator_name: baddbmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: baddbmm_ + operator_name: baddbmm_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: baddbmm_out + operator_name: baddbmm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bartlett_window + operator_name: bartlett_window + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bartlett_window + operator_name: bartlett_window + overload_name: periodic + manual_kernel_registration: false + category_override: '' + schema_string: aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: batch_norm + operator_name: batch_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantized_batch_norm + operator_name: quantized_batch_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: output_scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: output_zero_point + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: var + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: output_scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: output_zero_point + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _batch_norm_impl_index + operator_name: _batch_norm_impl_index + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: int64_t + name: result4 + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _batch_norm_impl_index_backward + operator_name: _batch_norm_impl_index_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: impl_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var_transform + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reservedSpace + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: impl_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var_transform + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reservedSpace + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bernoulli + operator_name: bernoulli + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli_out + operator_name: bernoulli + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli_ + operator_name: bernoulli_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: p + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: p + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli_ + operator_name: bernoulli_ + overload_name: float + manual_kernel_registration: false + category_override: '' + schema_string: aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli + operator_name: bernoulli + overload_name: p + manual_kernel_registration: false + category_override: '' + schema_string: aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bilinear + operator_name: bilinear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: binary_cross_entropy + operator_name: binary_cross_entropy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_out + operator_name: binary_cross_entropy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_backward + operator_name: binary_cross_entropy_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_backward_out + operator_name: binary_cross_entropy_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_with_logits + operator_name: binary_cross_entropy_with_logits + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: pos_weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: pos_weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_with_logits_backward + operator_name: binary_cross_entropy_with_logits_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: pos_weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: pos_weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bincount + operator_name: bincount + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weights + type: const c10::optional & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: minlength + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weights + type: const c10::optional & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: minlength + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_not + operator_name: bitwise_not + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_not(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_not_ + operator_name: bitwise_not_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_not_out + operator_name: bitwise_not + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copysign_out + operator_name: copysign + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copysign + operator_name: copysign + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copysign_ + operator_name: copysign_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copysign + operator_name: copysign + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copysign_ + operator_name: copysign_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copysign_out + operator_name: copysign + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_not + operator_name: logical_not + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_not(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_not_ + operator_name: logical_not_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_not_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_not_out + operator_name: logical_not + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor + operator_name: logical_xor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_ + operator_name: logical_xor_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_out + operator_name: logical_xor + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_and + operator_name: logical_and + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_and(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_and_ + operator_name: logical_and_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_and_out + operator_name: logical_and + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_or + operator_name: logical_or + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_or(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_or_ + operator_name: logical_or_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_or_out + operator_name: logical_or + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: blackman_window + operator_name: blackman_window + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: blackman_window + operator_name: blackman_window + overload_name: periodic + manual_kernel_registration: false + category_override: '' + schema_string: aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bmm + operator_name: bmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::bmm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bmm_out + operator_name: bmm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: broadcast_tensors + operator_name: broadcast_tensors + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: broadcast_to + operator_name: broadcast_to + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_broadcast_to + operator_name: _sparse_broadcast_to + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cat + operator_name: cat + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cat(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cat_out + operator_name: cat + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::TensorList, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cat + operator_name: cat + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor (at::TensorList, at::Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cat_out + operator_name: cat + overload_name: names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Dimname, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: concat + operator_name: concat + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::concat(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: concat_out + operator_name: concat + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::TensorList, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: concat + operator_name: concat + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor (at::TensorList, at::Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: concat_out + operator_name: concat + overload_name: names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Dimname, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: block_diag + operator_name: block_diag + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::block_diag(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ceil + operator_name: ceil + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ceil(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ceil_ + operator_name: ceil_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ceil_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ceil_out + operator_name: ceil + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: chain_matmul + operator_name: chain_matmul + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::chain_matmul(Tensor[] matrices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: matrices + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: matrices + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: chain_matmul_out + operator_name: chain_matmul + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: matrices + type: at::TensorList + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: matrices + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: unsafe_chunk + operator_name: unsafe_chunk + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: chunk + operator_name: chunk + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: tensor_split + operator_name: tensor_split + overload_name: sections + manual_kernel_registration: false + category_override: '' + schema_string: aten::tensor_split.sections(Tensor(a -> *) self, int sections, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: tensor_split + operator_name: tensor_split + overload_name: indices + manual_kernel_registration: false + category_override: '' + schema_string: aten::tensor_split.indices(Tensor(a -> *) self, int[] indices, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: tensor_split + operator_name: tensor_split + overload_name: tensor_indices_or_sections + manual_kernel_registration: false + category_override: '' + schema_string: aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor_indices_or_sections + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor_indices_or_sections + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: clamp + operator_name: clamp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp + operator_name: clamp + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_ + operator_name: clamp_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_ + operator_name: clamp_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_out + operator_name: clamp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_out + operator_name: clamp + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max + operator_name: clamp_max + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_max(Tensor self, Scalar max) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max + operator_name: clamp_max + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: max + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: max + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max_ + operator_name: clamp_max_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max_ + operator_name: clamp_max_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: max + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: max + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max_out + operator_name: clamp_max + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max_out + operator_name: clamp_max + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: max + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: max + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min + operator_name: clamp_min + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_min(Tensor self, Scalar min) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min + operator_name: clamp_min + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: min + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: min + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min_ + operator_name: clamp_min_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min_ + operator_name: clamp_min_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: min + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: min + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min_out + operator_name: clamp_min + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min_out + operator_name: clamp_min + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: min + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: min + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clip + operator_name: clip + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: clip + operator_name: clip + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: clip_ + operator_name: clip_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: clip_ + operator_name: clip_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: clip_out + operator_name: clip + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: max + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: clip_out + operator_name: clip + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: min + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: max + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cudnn_is_acceptable + operator_name: cudnn_is_acceptable + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_is_acceptable(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: complex + operator_name: complex + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::complex(Tensor real, Tensor imag) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: real + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: imag + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: real + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: imag + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: complex_out + operator_name: complex + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: real + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: imag + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: real + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: imag + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polar + operator_name: polar + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::polar(Tensor abs, Tensor angle) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: abs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: angle + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: abs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: angle + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polar_out + operator_name: polar + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: abs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: angle + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: abs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: angle + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: constant_pad_nd + operator_name: constant_pad_nd + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: pad + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: pad + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: contiguous + operator_name: contiguous + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: at::MemoryFormat + is_nullable: false + kwarg_only: true + name: memory_format + type: at::MemoryFormat + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::MemoryFormat) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: at::MemoryFormat + is_nullable: false + kwarg_only: true + name: memory_format + type: at::MemoryFormat + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: convolution + operator_name: convolution + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: convolution_backward + operator_name: convolution_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: bias_sizes + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: bias_sizes + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: convolution_overrideable + operator_name: convolution_overrideable + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: convolution_backward_overrideable + operator_name: convolution_backward_overrideable + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: grad_input + name: grad_input + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_weight + name: grad_weight + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_bias + name: grad_bias + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convolution + operator_name: _convolution + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convolution + operator_name: _convolution + overload_name: deprecated + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _convolution_mode + operator_name: _convolution_mode + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _convolution_double_backward + operator_name: _convolution_double_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: ggI + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: ggW + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: ggb + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: gO + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::IntArrayRef, int64_t, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: ggI + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: ggW + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: ggb + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: gO + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv1d + operator_name: conv1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv2d + operator_name: conv2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv3d + operator_name: conv3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv1d + operator_name: conv1d + overload_name: padding + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: '"valid"' + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: '"valid"' + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv2d + operator_name: conv2d + overload_name: padding + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: '"valid"' + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: '"valid"' + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv3d + operator_name: conv3d + overload_name: padding + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: '"valid"' + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::string_view, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: '"valid"' + dynamic_type: c10::string_view + is_nullable: false + name: padding + type: c10::string_view + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv_tbc + operator_name: conv_tbc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv_tbc_backward + operator_name: conv_tbc_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv_transpose1d + operator_name: conv_transpose1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv_transpose2d + operator_name: conv_transpose2d + overload_name: input + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: conv_transpose3d + operator_name: conv_transpose3d + overload_name: input + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: copy_ + operator_name: copy_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _copy_from + operator_name: _copy_from + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dst + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dst + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _copy_from_and_resize + operator_name: _copy_from_and_resize + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dst + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dst + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cos + operator_name: cos + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cos(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cos_ + operator_name: cos_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cos_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cos_out + operator_name: cos + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosh + operator_name: cosh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cosh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosh_ + operator_name: cosh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cosh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosh_out + operator_name: cosh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosine_embedding_loss + operator_name: cosine_embedding_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: count_nonzero + operator_name: count_nonzero + overload_name: dim_IntList + manual_kernel_registration: false + category_override: '' + schema_string: aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: count_nonzero + operator_name: count_nonzero + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::count_nonzero(Tensor self, int? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cov + operator_name: cov + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: correction + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: fweights + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: aweights + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: correction + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: fweights + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: aweights + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: corrcoef + operator_name: corrcoef + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::corrcoef(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cudnn_affine_grid_generator + operator_name: cudnn_affine_grid_generator + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: theta + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: theta + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: grid + name: grid + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_affine_grid_generator_backward + operator_name: cudnn_affine_grid_generator_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: grad_theta + name: grad_theta + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_batch_norm + operator_name: cudnn_batch_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_batch_norm_backward + operator_name: cudnn_batch_norm_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserveSpace + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserveSpace + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution + operator_name: cudnn_convolution + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_transpose + operator_name: cudnn_convolution_transpose + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_relu + operator_name: cudnn_convolution_relu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_add_relu + operator_name: cudnn_convolution_add_relu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: z + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: alpha + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: z + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: alpha + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_grid_sampler + operator_name: cudnn_grid_sampler + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_grid_sampler_backward + operator_name: cudnn_grid_sampler_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: grad_self + name: grad_self + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_grid + name: grad_grid + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummax + operator_name: cummax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummax_out + operator_name: cummax + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummax + operator_name: cummax + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cummax_out + operator_name: cummax + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cummax_helper + operator_name: _cummax_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: values + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: void (const at::Tensor &, at::Tensor &, at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: values + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummin + operator_name: cummin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummin_out + operator_name: cummin + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummin + operator_name: cummin + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cummin_out + operator_name: cummin + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cummin_helper + operator_name: _cummin_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: values + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: void (const at::Tensor &, at::Tensor &, at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: values + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummaxmin_backward + operator_name: cummaxmin_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumprod + operator_name: cumprod + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod_ + operator_name: cumprod_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod_out + operator_name: cumprod + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod + operator_name: cumprod + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumprod_ + operator_name: cumprod_ + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumprod_out + operator_name: cumprod + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumprod_backward + operator_name: cumprod_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumsum + operator_name: cumsum + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumsum_ + operator_name: cumsum_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumsum_out + operator_name: cumsum + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumsum + operator_name: cumsum + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumsum_ + operator_name: cumsum_ + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumsum_out + operator_name: cumsum + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumulative_trapezoid + operator_name: cumulative_trapezoid + overload_name: x + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cumulative_trapezoid + operator_name: cumulative_trapezoid + overload_name: dx + manual_kernel_registration: false + category_override: '' + schema_string: aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: dx + type: const at::Scalar & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: dx + type: const at::Scalar & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ctc_loss + operator_name: ctc_loss + overload_name: IntList + manual_kernel_registration: false + category_override: '' + schema_string: aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ctc_loss + operator_name: ctc_loss + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_lengths + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target_lengths + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_lengths + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target_lengths + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _ctc_loss + operator_name: _ctc_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _ctc_loss_backward + operator_name: _ctc_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: neg_log_likelihood + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_alpha + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_probs + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: targets + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: target_lengths + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: neg_log_likelihood + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: log_alpha + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag_embed + operator_name: diag_embed + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: -2 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: -2 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diagflat + operator_name: diagflat + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diagflat(Tensor self, int offset=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: diagonal + operator_name: diagonal + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_diagonal + operator_name: linalg_diagonal + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: offset + type: int64_t + - annotation: null + default: -2 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim1 + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim2 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: offset + type: int64_t + - annotation: null + default: -2 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim1 + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim2 + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: diagonal + operator_name: diagonal + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: outdim + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim1 + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim2 + type: at::Dimname + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: offset + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, at::Dimname, at::Dimname, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: outdim + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim1 + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim2 + type: at::Dimname + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: offset + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: diagonal_backward + operator_name: diagonal_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diagonal_backward(Tensor grad_output, int[] input_sizes, int offset, int dim1, int dim2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: fill_diagonal_ + operator_name: fill_diagonal_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: wrap + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: wrap + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: diff + operator_name: diff + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: prepend + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: append + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: prepend + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: append + type: const c10::optional & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: diff_out + operator_name: diff + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: prepend + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: append + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, const c10::optional &, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: prepend + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: append + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gradient + operator_name: gradient + overload_name: scalarint + manual_kernel_registration: false + category_override: '' + schema_string: aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + kwarg_only: true + name: spacing + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, const c10::optional &, c10::optional, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + kwarg_only: true + name: spacing + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gradient + operator_name: gradient + overload_name: scalararray + manual_kernel_registration: false + category_override: '' + schema_string: aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: spacing + type: const at::Scalar & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, const at::Scalar &, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: spacing + type: const at::Scalar & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gradient + operator_name: gradient + overload_name: array + manual_kernel_registration: false + category_override: '' + schema_string: aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gradient + operator_name: gradient + overload_name: scalarrayint + manual_kernel_registration: false + category_override: '' + schema_string: aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + kwarg_only: true + name: spacing + type: at::ArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::ArrayRef, c10::optional, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + kwarg_only: true + name: spacing + type: at::ArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gradient + operator_name: gradient + overload_name: scalarrayarray + manual_kernel_registration: false + category_override: '' + schema_string: aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + kwarg_only: true + name: spacing + type: at::ArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::ArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + kwarg_only: true + name: spacing + type: at::ArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gradient + operator_name: gradient + overload_name: tensorarrayint + manual_kernel_registration: false + category_override: '' + schema_string: aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + kwarg_only: true + name: spacing + type: at::TensorList + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::TensorList, c10::optional, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + kwarg_only: true + name: spacing + type: at::TensorList + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gradient + operator_name: gradient + overload_name: tensorarray + manual_kernel_registration: false + category_override: '' + schema_string: aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + kwarg_only: true + name: spacing + type: at::TensorList + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::TensorList, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + kwarg_only: true + name: spacing + type: at::TensorList + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + kwarg_only: true + name: dim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: edge_order + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: div + operator_name: div + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::div.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_ + operator_name: div_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_out + operator_name: div + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div + operator_name: div + overload_name: Tensor_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_ + operator_name: div_ + overload_name: Tensor_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_out + operator_name: div + overload_name: out_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div + operator_name: div + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::div.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_ + operator_name: div_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div + operator_name: div + overload_name: Scalar_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_ + operator_name: div_ + overload_name: Scalar_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: divide + operator_name: divide + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide_ + operator_name: divide_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide_out + operator_name: divide + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide + operator_name: divide + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide_ + operator_name: divide_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide + operator_name: divide + overload_name: Tensor_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide_ + operator_name: divide_ + overload_name: Tensor_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide_out + operator_name: divide + overload_name: out_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide + operator_name: divide + overload_name: Scalar_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: divide_ + operator_name: divide_ + overload_name: Scalar_mode + manual_kernel_registration: false + category_override: '' + schema_string: aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: rounding_mode + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: true_divide + operator_name: true_divide + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: true_divide_ + operator_name: true_divide_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: true_divide_out + operator_name: true_divide + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: true_divide + operator_name: true_divide + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: true_divide_ + operator_name: true_divide_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: dot + operator_name: dot + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::dot(Tensor self, Tensor tensor) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dot_out + operator_name: dot + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: vdot + operator_name: vdot + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::vdot(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: vdot_out + operator_name: vdot + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: einsum + operator_name: einsum + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::einsum(str equation, Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: equation + type: c10::string_view + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (c10::string_view, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: equation + type: c10::string_view + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: embedding + operator_name: embedding + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_backward + operator_name: embedding_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: embedding_dense_backward + operator_name: embedding_dense_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::embedding_dense_backward(Tensor grad_output, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_renorm_ + operator_name: embedding_renorm_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: max_norm + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: norm_type + type: double + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, double, double) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: max_norm + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: norm_type + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_sparse_backward + operator_name: embedding_sparse_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _embedding_bag_forward_only + operator_name: _embedding_bag_forward_only + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _rowwise_prune + operator_name: _rowwise_prune + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: compressed_indices_dtype + type: at::ScalarType + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: compressed_indices_dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: row_stack + operator_name: row_stack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::row_stack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: row_stack_out + operator_name: row_stack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: embedding_bag + operator_name: embedding_bag + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: embedding_bag + operator_name: embedding_bag + overload_name: padding_idx + manual_kernel_registration: false + category_override: '' + schema_string: aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: padding_idx + type: c10::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: padding_idx + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _embedding_bag + operator_name: _embedding_bag + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, int64_t, bool, const c10::optional &, bool, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag_backward + operator_name: _embedding_bag_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bag_size + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: maximum_indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, bool, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bag_size + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: maximum_indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _embedding_bag_sparse_backward + operator_name: _embedding_bag_sparse_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bag_size + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bag_size + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _embedding_bag_dense_backward + operator_name: _embedding_bag_dense_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bag_size + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: maximum_indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, int64_t, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bag_size + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: maximum_indices + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag_per_sample_weights_backward + operator_name: _embedding_bag_per_sample_weights_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offsets + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: offset2bag + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty + operator_name: empty + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: empty + operator_name: empty + overload_name: memory_format + manual_kernel_registration: false + category_override: '' + schema_string: aten::empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_empty + operator_name: new_empty + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_empty_strided + operator_name: new_empty_strided + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::new_empty_strided(Tensor self, int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_full + operator_name: new_full + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: new_zeros + operator_name: new_zeros + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::new_zeros(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: new_ones + operator_name: new_ones + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::new_ones(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _empty_affine_quantized + operator_name: _empty_affine_quantized + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: scale + type: double + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: zero_point + type: int64_t + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, double, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: scale + type: double + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: zero_point + type: int64_t + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _empty_per_channel_affine_quantized + operator_name: _empty_per_channel_affine_quantized + overload_name: '' + manual_kernel_registration: false + category_override: factory + schema_string: aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: scales + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: zero_points + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: scales + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + kwarg_only: true + name: zero_points + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: resize_ + operator_name: resize_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: const at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_quantized + operator_name: empty_quantized + overload_name: '' + manual_kernel_registration: false + category_override: factory + schema_string: aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qtensor + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qtensor + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_out + operator_name: empty + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: empty_like + operator_name: empty_like + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_strided + operator_name: empty_strided + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erf + operator_name: erf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::erf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erf_ + operator_name: erf_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::erf_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erf_out + operator_name: erf + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfc + operator_name: erfc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::erfc(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfc_ + operator_name: erfc_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::erfc_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfc_out + operator_name: erfc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp + operator_name: exp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::exp(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp_ + operator_name: exp_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::exp_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp_out + operator_name: exp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp2 + operator_name: exp2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::exp2(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp2_ + operator_name: exp2_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::exp2_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp2_out + operator_name: exp2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expm1 + operator_name: expm1 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::expm1(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expm1_ + operator_name: expm1_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::expm1_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expm1_out + operator_name: expm1 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expand + operator_name: expand + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: implicit + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: implicit + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: expand_as + operator_name: expand_as + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: eye + operator_name: eye + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: eye + operator_name: eye + overload_name: m + manual_kernel_registration: false + category_override: '' + schema_string: aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: eye_out + operator_name: eye + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: at::Tensor & (int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eye_out + operator_name: eye + overload_name: m_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: flatten + operator_name: flatten + overload_name: using_ints + manual_kernel_registration: false + category_override: '' + schema_string: aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: flatten + operator_name: flatten + overload_name: named_out_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: out_dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, at::Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: out_dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: flatten + operator_name: flatten + overload_name: using_names + manual_kernel_registration: false + category_override: '' + schema_string: aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: start_dim + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: end_dim + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: out_dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, at::Dimname, at::Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: start_dim + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: end_dim + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: out_dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: flatten + operator_name: flatten + overload_name: DimnameList + manual_kernel_registration: false + category_override: '' + schema_string: aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dims + type: at::DimnameList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: out_dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, at::Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dims + type: at::DimnameList + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: out_dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: unflatten + operator_name: unflatten + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::unflatten.int(Tensor(a) self, int dim, int[] sizes, Dimname[]? names=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::DimnameList + is_nullable: true + name: names + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::DimnameList + is_nullable: true + name: names + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: unflatten + operator_name: unflatten + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: names + type: at::DimnameList + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, at::IntArrayRef, at::DimnameList) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: names + type: at::DimnameList + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fill_ + operator_name: fill_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fill_ + operator_name: fill_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor + operator_name: floor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_ + operator_name: floor_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_out + operator_name: floor + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide + operator_name: floor_divide + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_divide(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide_ + operator_name: floor_divide_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide_out + operator_name: floor_divide + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide + operator_name: floor_divide + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: floor_divide_ + operator_name: floor_divide_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: frac + operator_name: frac + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::frac(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frac_ + operator_name: frac_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::frac_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frac_out + operator_name: frac + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: full + operator_name: full + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: full + operator_name: full + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: full_out + operator_name: full + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: full_like + operator_name: full_like + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: fill_value + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: from_file + operator_name: from_file + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: filename + type: c10::string_view + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: shared + type: c10::optional + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: true + name: size + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (c10::string_view, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: filename + type: c10::string_view + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: shared + type: c10::optional + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: true + name: size + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gcd_out + operator_name: gcd + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gcd + operator_name: gcd + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::gcd(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gcd_ + operator_name: gcd_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lcm_out + operator_name: lcm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lcm + operator_name: lcm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lcm(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lcm_ + operator_name: lcm_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler + operator_name: grid_sampler + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: grid_sampler_2d + operator_name: grid_sampler_2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler_2d_backward + operator_name: grid_sampler_2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _grid_sampler_2d_cpu_fallback + operator_name: _grid_sampler_2d_cpu_fallback + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _grid_sampler_2d_cpu_fallback_backward + operator_name: _grid_sampler_2d_cpu_fallback_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: grid_sampler_3d + operator_name: grid_sampler_3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler_3d_backward + operator_name: grid_sampler_3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grid + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hann_window + operator_name: hann_window + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hann_window + operator_name: hann_window + overload_name: periodic + manual_kernel_registration: false + category_override: '' + schema_string: aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hamming_window + operator_name: hamming_window + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hamming_window + operator_name: hamming_window + overload_name: periodic + manual_kernel_registration: false + category_override: '' + schema_string: aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hamming_window + operator_name: hamming_window + overload_name: periodic_alpha + manual_kernel_registration: false + category_override: '' + schema_string: aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hamming_window + operator_name: hamming_window + overload_name: periodic_alpha_beta + manual_kernel_registration: false + category_override: '' + schema_string: aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: kaiser_window + operator_name: kaiser_window + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: kaiser_window + operator_name: kaiser_window + overload_name: periodic + manual_kernel_registration: false + category_override: '' + schema_string: aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: kaiser_window + operator_name: kaiser_window + overload_name: beta + manual_kernel_registration: false + category_override: '' + schema_string: aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hinge_embedding_loss + operator_name: hinge_embedding_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: group_norm + operator_name: group_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_groups + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const c10::optional &, const c10::optional &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_groups + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: native_group_norm + operator_name: native_group_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, int N, int C, int HxW, int group, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: native_group_norm_backward + operator_name: native_group_norm_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int N, int C, int HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, int64_t, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_r2c + operator_name: _fft_r2c + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_r2c_out + operator_name: _fft_r2c + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_c2r + operator_name: _fft_c2r + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: last_dim_size + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: last_dim_size + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_c2r_out + operator_name: _fft_c2r + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: last_dim_size + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: last_dim_size + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_c2c + operator_name: _fft_c2c + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fft_c2c(Tensor self, int[] dim, int normalization, bool forward) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: forward + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: forward + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_c2c_out + operator_name: _fft_c2c + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fft_c2c.out(Tensor self, int[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: forward + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: forward + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cufft_get_plan_cache_size + operator_name: _cufft_get_plan_cache_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cufft_get_plan_cache_size(int device_index) -> int + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + schema_order_cpp_signature: int64_t (int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cufft_get_plan_cache_max_size + operator_name: _cufft_get_plan_cache_max_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cufft_get_plan_cache_max_size(int device_index) -> int + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + schema_order_cpp_signature: int64_t (int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cufft_set_plan_cache_max_size + operator_name: _cufft_set_plan_cache_max_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> () + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_size + type: int64_t + schema_order_cpp_signature: void (int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_size + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _cufft_clear_plan_cache + operator_name: _cufft_clear_plan_cache + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cufft_clear_plan_cache(int device_index) -> () + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + schema_order_cpp_signature: void (int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index + operator_name: index + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_copy_ + operator_name: index_copy_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_copy + operator_name: index_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_copy_ + operator_name: index_copy_ + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_copy + operator_name: index_copy + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_put_ + operator_name: index_put_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List> &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_put + operator_name: index_put + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::List> &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _index_put_impl_ + operator_name: _index_put_impl_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unsafe + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const c10::List> &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const c10::List> & + is_nullable: true + name: indices + type: const c10::List> & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unsafe + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: instance_norm + operator_name: instance_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: use_input_stats + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: use_input_stats + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: inverse + operator_name: inverse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::inverse(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: inverse_out + operator_name: inverse + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isclose + operator_name: isclose + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: isin_out + operator_name: isin + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isin + operator_name: isin + overload_name: Tensor_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isin_out + operator_name: isin + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: test_element + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: test_element + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isin + operator_name: isin + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: test_element + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: elements + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: test_element + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isin_out + operator_name: isin + overload_name: Scalar_Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: element + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: element + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isin + operator_name: isin + overload_name: Scalar_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: element + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: element + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: test_elements + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: assume_unique + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: invert + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isnan + operator_name: isnan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::isnan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_distributed + operator_name: is_distributed + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_distributed(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_floating_point + operator_name: is_floating_point + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_floating_point(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_complex + operator_name: is_complex + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_complex(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_conj + operator_name: is_conj + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_conj(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: _is_zerotensor + operator_name: _is_zerotensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_is_zerotensor(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_neg + operator_name: is_neg + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_neg(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: isreal + operator_name: isreal + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::isreal(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_nonzero + operator_name: is_nonzero + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_nonzero(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_same_size + operator_name: is_same_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_same_size(Tensor self, Tensor other) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_signed + operator_name: is_signed + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_signed(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_inference + operator_name: is_inference + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_inference(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: kl_div + operator_name: kl_div + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kl_div_backward + operator_name: kl_div_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kron + operator_name: kron + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::kron(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: kron_out + operator_name: kron + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: kthvalue + operator_name: kthvalue + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kthvalue_out + operator_name: kthvalue + overload_name: values + manual_kernel_registration: false + category_override: '' + schema_string: aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kthvalue + operator_name: kthvalue + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: kthvalue_out + operator_name: kthvalue + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: layer_norm + operator_name: layer_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enable + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enable + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: native_layer_norm + operator_name: native_layer_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, const c10::optional &, const c10::optional &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _native_multi_head_self_attention + operator_name: _native_multi_head_self_attention + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_native_multi_head_self_attention(Tensor query, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: query + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: qkv_bias + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: proj_bias + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: mask + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_layer_norm_backward + operator_name: native_layer_norm_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_layer_norm_backward(Tensor grad_out, Tensor input, int[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: normalized_shape + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nan_to_num + operator_name: nan_to_num + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: nan + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: posinf + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: neginf + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: nan + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: posinf + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: neginf + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nan_to_num_ + operator_name: nan_to_num_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: nan + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: posinf + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: neginf + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: nan + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: posinf + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: neginf + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nan_to_num_out + operator_name: nan_to_num + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: nan + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: posinf + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: neginf + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: nan + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: posinf + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: neginf + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linear + operator_name: linear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linear_out + operator_name: linear + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mkldnn_linear + operator_name: mkldnn_linear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_linear_backward_input + operator_name: mkldnn_linear_backward_input + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_linear_backward_weights + operator_name: mkldnn_linear_backward_weights + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_defined + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_defined + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_linear_backward + operator_name: mkldnn_linear_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_linear_int8_weight_fp32_activation + operator_name: fbgemm_linear_int8_weight_fp32_activation + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_zero_point + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_zero_point + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fbgemm_linear_int8_weight + operator_name: fbgemm_linear_int8_weight + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_zero_point + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight_zero_point + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fbgemm_linear_quantize_weight + operator_name: fbgemm_linear_quantize_weight + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: double + name: result2 + type: double + - dynamic_type: int64_t + name: result3 + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fbgemm_pack_gemm_matrix_fp16 + operator_name: fbgemm_pack_gemm_matrix_fp16 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fbgemm_linear_fp16_weight_fp32_activation + operator_name: fbgemm_linear_fp16_weight_fp32_activation + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fbgemm_linear_fp16_weight + operator_name: fbgemm_linear_fp16_weight + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bias + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fbgemm_pack_quantized_matrix + operator_name: fbgemm_pack_quantized_matrix + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fbgemm_pack_quantized_matrix + operator_name: fbgemm_pack_quantized_matrix + overload_name: KN + manual_kernel_registration: false + category_override: '' + schema_string: aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: K + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: K + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ldexp + operator_name: ldexp + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ldexp_ + operator_name: ldexp_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ldexp_out + operator_name: ldexp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linspace + operator_name: linspace + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linspace_out + operator_name: linspace + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log + operator_name: log + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_ + operator_name: log_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_out + operator_name: log + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log10 + operator_name: log10 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log10(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log10_ + operator_name: log10_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log10_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log10_out + operator_name: log10 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log1p + operator_name: log1p + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log1p(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log1p_ + operator_name: log1p_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log1p_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log1p_out + operator_name: log1p + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log2 + operator_name: log2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log2(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log2_ + operator_name: log2_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log2_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log2_out + operator_name: log2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp_out + operator_name: logaddexp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp + operator_name: logaddexp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logaddexp(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp2_out + operator_name: logaddexp2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp2 + operator_name: logaddexp2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logaddexp2(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy + operator_name: xlogy + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy + operator_name: xlogy + overload_name: Scalar_Self + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy + operator_name: xlogy + overload_name: Scalar_Other + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy_ + operator_name: xlogy_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy_ + operator_name: xlogy_ + overload_name: Scalar_Other + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy_out + operator_name: xlogy + overload_name: OutTensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy_out + operator_name: xlogy + overload_name: OutScalar_Self + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: xlogy_out + operator_name: xlogy + overload_name: OutScalar_Other + manual_kernel_registration: false + category_override: '' + schema_string: aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logdet + operator_name: logdet + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logdet(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: logspace_out + operator_name: logspace + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: steps + type: int64_t + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_softmax + operator_name: log_softmax + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: log_softmax + operator_name: log_softmax + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _log_softmax + operator_name: _log_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _log_softmax_out + operator_name: _log_softmax + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _log_softmax_backward_data + operator_name: _log_softmax_backward_data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _log_softmax_backward_data_out + operator_name: _log_softmax_backward_data + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::ScalarType, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _logcumsumexp + operator_name: _logcumsumexp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_logcumsumexp(Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _logcumsumexp_out + operator_name: _logcumsumexp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logcumsumexp + operator_name: logcumsumexp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logcumsumexp(Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logcumsumexp_out + operator_name: logcumsumexp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logcumsumexp + operator_name: logcumsumexp + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: logcumsumexp_out + operator_name: logcumsumexp + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: logsumexp + operator_name: logsumexp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logsumexp_out + operator_name: logsumexp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logsumexp + operator_name: logsumexp + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: logsumexp_out + operator_name: logsumexp + overload_name: names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: margin_ranking_loss + operator_name: margin_ranking_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matmul + operator_name: matmul + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::matmul(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matmul_out + operator_name: matmul + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matrix_rank + operator_name: matrix_rank + overload_name: tol + manual_kernel_registration: false + category_override: '' + schema_string: aten::matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matrix_rank + operator_name: matrix_rank + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::matrix_rank(Tensor self, bool symmetric=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matrix_power + operator_name: matrix_power + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::matrix_power(Tensor self, int n) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matrix_power_out + operator_name: matrix_power + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matrix_exp + operator_name: matrix_exp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::matrix_exp(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matrix_exp_backward + operator_name: matrix_exp_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _aminmax + operator_name: _aminmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_aminmax(Tensor self) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _aminmax + operator_name: _aminmax + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: aminmax + operator_name: aminmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: min + name: min + type: at::Tensor + - dynamic_type: at::Tensor + field_name: max + name: max + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: aminmax_out + operator_name: aminmax + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: min + is_nullable: false + name: min + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: max + is_nullable: false + name: max + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: min + is_nullable: false + name: min + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: max + is_nullable: false + name: max + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: min + name: min + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: max + name: max + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _compute_linear_combination + operator_name: _compute_linear_combination + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: coefficients + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: coefficients + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _compute_linear_combination_out + operator_name: _compute_linear_combination + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: coefficients + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: coefficients + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_out + operator_name: max + overload_name: dim_max + manual_kernel_registration: false + category_override: '' + schema_string: aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: max + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: max + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: max + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: max_values + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: max_out + operator_name: max + overload_name: names_dim_max + manual_kernel_registration: false + category_override: '' + schema_string: aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: max + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: max + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: max + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: max_values + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: value_selecting_reduction_backward + operator_name: value_selecting_reduction_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, int[] sizes, bool keepdim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: amax + operator_name: amax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: amax_out + operator_name: amax + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool1d_with_indices + operator_name: max_pool1d_with_indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: max_pool1d + operator_name: max_pool1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: max_pool2d + operator_name: max_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mkldnn_max_pool2d + operator_name: mkldnn_max_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_max_pool2d_backward + operator_name: mkldnn_max_pool2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_max_pool3d + operator_name: mkldnn_max_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_max_pool3d_backward + operator_name: mkldnn_max_pool3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_max_pool1d + operator_name: quantized_max_pool1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 1 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 1 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_max_pool2d + operator_name: quantized_max_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d + operator_name: max_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mean + operator_name: mean + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean + operator_name: mean + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean_out + operator_name: mean + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean + operator_name: mean + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mean_out + operator_name: mean + overload_name: names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanmean + operator_name: nanmean + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanmean(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanmean_out + operator_name: nanmean + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanmean.out(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: median + operator_name: median + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::median(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median + operator_name: median + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median_out + operator_name: median + overload_name: dim_values + manual_kernel_registration: false + category_override: '' + schema_string: aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median + operator_name: median + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: median_out + operator_name: median + overload_name: names_dim_values + manual_kernel_registration: false + category_override: '' + schema_string: aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanmedian + operator_name: nanmedian + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanmedian(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nanmedian + operator_name: nanmedian + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nanmedian_out + operator_name: nanmedian + overload_name: dim_values + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nanmedian + operator_name: nanmedian + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanmedian_out + operator_name: nanmedian + overload_name: names_dim_values + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: min + operator_name: min + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min_out + operator_name: min + overload_name: dim_min + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: min + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: min + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: min + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: min_indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min + operator_name: min + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: min_out + operator_name: min + overload_name: names_dim_min + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: min + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: min + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: min + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: min_indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: amin + operator_name: amin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: amin_out + operator_name: amin + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_convolution + operator_name: mkldnn_convolution + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_batch_norm + operator_name: miopen_batch_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_batch_norm_backward + operator_name: miopen_batch_norm_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution + operator_name: miopen_convolution + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_transpose + operator_name: miopen_convolution_transpose + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_depthwise_convolution + operator_name: miopen_depthwise_convolution + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_rnn + operator_name: miopen_rnn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_rnn_backward + operator_name: miopen_rnn_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_buf + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_output + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: weight + type: at::TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight_buf + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: cx + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_output + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: batch_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: dropout_state + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: reserve + type: const at::Tensor & + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::TensorList + name: result3 + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mm + operator_name: mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mm_out + operator_name: mm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_mm + operator_name: _sparse_mm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sparse + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sparse + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_sparse_matmul + operator_name: _sparse_sparse_matmul + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_mask_helper + operator_name: _sparse_mask_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_mask_helper(Tensor t, Tensor mask_indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: t + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask_indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: t + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask_indices + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mode + operator_name: mode + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mode_out + operator_name: mode + overload_name: values + manual_kernel_registration: false + category_override: '' + schema_string: aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mode + operator_name: mode + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mode_out + operator_name: mode + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mul + operator_name: mul + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::mul.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul_ + operator_name: mul_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul_out + operator_name: mul + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul + operator_name: mul + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::mul.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul_ + operator_name: mul_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multiply + operator_name: multiply + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: multiply_ + operator_name: multiply_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: multiply_out + operator_name: multiply + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: multiply + operator_name: multiply + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: multiply_ + operator_name: multiply_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mv + operator_name: mv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mv(Tensor self, Tensor vec) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mv_out + operator_name: mv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mvlgamma_out + operator_name: mvlgamma + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mvlgamma + operator_name: mvlgamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mvlgamma(Tensor self, int p) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mvlgamma_ + operator_name: mvlgamma_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: narrow_copy + operator_name: narrow_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::narrow_copy(Tensor self, int dim, int start, int length) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: narrow_copy_out + operator_name: narrow_copy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::narrow_copy.out(Tensor self, int dim, int start, int length, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: narrow + operator_name: narrow + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: narrow + operator_name: narrow + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: start + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: native_batch_norm + operator_name: native_batch_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_batch_norm_out + operator_name: native_batch_norm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: save_mean + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: save_invstd + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: save_mean + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: save_invstd + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + - dynamic_type: at::Tensor + name: save_mean + type: at::Tensor & + - dynamic_type: at::Tensor + name: save_invstd + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_stats + operator_name: batch_norm_stats + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_elemt + operator_name: batch_norm_elemt + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_elemt_out + operator_name: batch_norm_elemt + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_gather_stats + operator_name: batch_norm_gather_stats + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: count + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: count + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_gather_stats_with_counts + operator_name: batch_norm_gather_stats_with_counts + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: counts + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, double, double, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: counts + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_batch_norm_backward + operator_name: native_batch_norm_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_invstd + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: save_invstd + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_backward_reduce + operator_name: batch_norm_backward_reduce + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: input_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: weight_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_g + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: bool + is_nullable: false + name: input_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: weight_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_g + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_backward_elemt + operator_name: batch_norm_backward_elemt + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean_dy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean_dy_xmu + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: count + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: invstd + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean_dy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean_dy_xmu + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: count + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_update_stats + operator_name: batch_norm_update_stats + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_mean + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: running_var + type: const c10::optional & + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_vulkan_available + operator_name: is_vulkan_available + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_vulkan_available() -> bool + arguments: [] + schema_order_cpp_signature: bool () + schema_order_arguments: [] + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _nnpack_available + operator_name: _nnpack_available + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nnpack_available() -> bool + arguments: [] + schema_order_cpp_signature: bool () + schema_order_arguments: [] + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _nnpack_spatial_convolution + operator_name: _nnpack_spatial_convolution + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ones + operator_name: ones + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: ones + operator_name: ones + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ones_out + operator_name: ones + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ones_like + operator_name: ones_like + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: pairwise_distance + operator_name: pairwise_distance + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cdist + operator_name: cdist + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _euclidean_dist + operator_name: _euclidean_dist + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cdist_forward + operator_name: _cdist_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cdist_backward + operator_name: _cdist_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cdist + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cdist + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pdist + operator_name: pdist + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::pdist(Tensor self, float p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _pdist_forward + operator_name: _pdist_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_pdist_forward(Tensor self, float p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _pdist_backward + operator_name: _pdist_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: pdist + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, double, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: pdist + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosine_similarity + operator_name: cosine_similarity + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: permute + operator_name: permute + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: movedim + operator_name: movedim + overload_name: intlist + manual_kernel_registration: false + category_override: '' + schema_string: aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: source + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: destination + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: source + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: destination + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: movedim + operator_name: movedim + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: source + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: destination + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: source + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: destination + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: moveaxis + operator_name: moveaxis + overload_name: intlist + manual_kernel_registration: false + category_override: '' + schema_string: aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: source + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: destination + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: source + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: destination + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: moveaxis + operator_name: moveaxis + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: source + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: destination + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: source + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: destination + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: numpy_T + operator_name: numpy_T + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::numpy_T(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: matrix_H + operator_name: matrix_H + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::matrix_H(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mT + operator_name: mT + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mT(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mH + operator_name: mH + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mH(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: adjoint + operator_name: adjoint + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adjoint(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: pixel_shuffle + operator_name: pixel_shuffle + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: upscale_factor + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: upscale_factor + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: pixel_unshuffle + operator_name: pixel_unshuffle + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: downscale_factor + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: downscale_factor + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: channel_shuffle + operator_name: channel_shuffle + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::channel_shuffle(Tensor self, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_channel_shuffle + operator_name: native_channel_shuffle + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_channel_shuffle(Tensor self, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: is_pinned + operator_name: is_pinned + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_pinned(Tensor self, Device? device=None) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + name: device + type: c10::optional + schema_order_cpp_signature: bool (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + name: device + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pin_memory + operator_name: pin_memory + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + name: device + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + name: device + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _pin_memory + operator_name: _pin_memory + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_pin_memory(Tensor self, Device? device=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + name: device + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + name: device + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pinverse + operator_name: pinverse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1.0e-15 + dynamic_type: double + is_nullable: false + name: rcond + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1.0e-15 + dynamic_type: double + is_nullable: false + name: rcond + type: double + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: poisson_nll_loss + operator_name: poisson_nll_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: log_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: full + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: log_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: full + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rad2deg + operator_name: rad2deg + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rad2deg(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rad2deg_ + operator_name: rad2deg_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rad2deg_out + operator_name: rad2deg + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: deg2rad + operator_name: deg2rad + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::deg2rad(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: deg2rad_ + operator_name: deg2rad_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: deg2rad_out + operator_name: deg2rad + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scalar_tensor + operator_name: scalar_tensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: s + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: s + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rand + operator_name: rand + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: rand + operator_name: rand + overload_name: generator_with_names + manual_kernel_registration: false + category_override: '' + schema_string: aten::rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: rand + operator_name: rand + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rand + operator_name: rand + overload_name: generator + manual_kernel_registration: false + category_override: '' + schema_string: aten::rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rand_out + operator_name: rand + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rand_out + operator_name: rand + overload_name: generator_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rand_like + operator_name: rand_like + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint + operator_name: randint + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint + operator_name: randint + overload_name: generator + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint + operator_name: randint + overload_name: low + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint + operator_name: randint + overload_name: low_generator + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint_out + operator_name: randint + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (int64_t, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint_out + operator_name: randint + overload_name: generator_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (int64_t, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint_out + operator_name: randint + overload_name: low_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint_out + operator_name: randint + overload_name: low_generator_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (int64_t, int64_t, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint_like + operator_name: randint_like + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randint_like + operator_name: randint_like + overload_name: low_dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randn + operator_name: randn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randn + operator_name: randn + overload_name: generator + manual_kernel_registration: false + category_override: '' + schema_string: aten::randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randn + operator_name: randn + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: randn + operator_name: randn + overload_name: generator_with_names + manual_kernel_registration: false + category_override: '' + schema_string: aten::randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: randn_out + operator_name: randn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randn_out + operator_name: randn + overload_name: generator_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randn_like + operator_name: randn_like + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randperm + operator_name: randperm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randperm + operator_name: randperm + overload_name: generator + manual_kernel_registration: false + category_override: '' + schema_string: aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: at::kLong + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: at::kLong + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randperm_out + operator_name: randperm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: at::Tensor & (int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: randperm_out + operator_name: randperm + overload_name: generator_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: range + operator_name: range + overload_name: step + manual_kernel_registration: false + category_override: '' + schema_string: aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: range + operator_name: range + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: range_out + operator_name: range + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: start + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: end + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: step + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ravel + operator_name: ravel + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ravel(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: reciprocal + operator_name: reciprocal + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reciprocal(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reciprocal_ + operator_name: reciprocal_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reciprocal_out + operator_name: reciprocal + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: neg + operator_name: neg + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::neg(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: neg_ + operator_name: neg_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::neg_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: neg_out + operator_name: neg + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: negative + operator_name: negative + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::negative(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: negative_ + operator_name: negative_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::negative_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: negative_out + operator_name: negative + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: repeat + operator_name: repeat + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::repeat(Tensor self, int[] repeats) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: repeats + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: repeats + type: at::IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: repeat_interleave + operator_name: repeat_interleave + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: repeats + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: output_size + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: repeats + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: output_size + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: repeat_interleave + operator_name: repeat_interleave + overload_name: self_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: repeats + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: output_size + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: repeats + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: output_size + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: repeat_interleave + operator_name: repeat_interleave + overload_name: self_int + manual_kernel_registration: false + category_override: '' + schema_string: aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: repeats + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: output_size + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: repeats + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: output_size + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: reshape + operator_name: reshape + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reshape(Tensor(a) self, int[] shape) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: shape + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: shape + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: _reshape_alias + operator_name: _reshape_alias + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_reshape_alias(Tensor(a) self, int[] size, int[] stride) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mkldnn_reshape + operator_name: _mkldnn_reshape + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: shape + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: shape + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: reshape_as + operator_name: reshape_as + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: round + operator_name: round + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::round(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: round_ + operator_name: round_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::round_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: round_out + operator_name: round + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: round + operator_name: round + overload_name: decimals + manual_kernel_registration: false + category_override: '' + schema_string: aten::round.decimals(Tensor self, *, int decimals) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: round_ + operator_name: round_ + overload_name: decimals + manual_kernel_registration: false + category_override: '' + schema_string: aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: round_out + operator_name: round + overload_name: decimals_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu + operator_name: rrelu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rrelu_ + operator_name: rrelu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: relu + operator_name: relu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::relu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: relu_ + operator_name: relu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::relu_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: relu6 + operator_name: relu6 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::relu6(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: relu6_ + operator_name: relu6_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::relu6_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: prelu + operator_name: prelu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::prelu(Tensor self, Tensor weight) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prelu_backward + operator_name: prelu_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gelu_out + operator_name: gelu + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::gelu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gelu + operator_name: gelu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::gelu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gelu_backward_out + operator_name: gelu_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::gelu_backward.grad_input(Tensor grad, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gelu_backward + operator_name: gelu_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::gelu_backward(Tensor grad, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: infinitely_differentiable_gelu_backward + operator_name: infinitely_differentiable_gelu_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: hardshrink_out + operator_name: hardshrink + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardshrink + operator_name: hardshrink + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardshrink_backward_out + operator_name: hardshrink_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardshrink_backward + operator_name: hardshrink_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_out + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsqrt + operator_name: rsqrt + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rsqrt(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsqrt_ + operator_name: rsqrt_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsqrt_out + operator_name: rsqrt + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: select + operator_name: select + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: select + operator_name: select + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: select_backward + operator_name: select_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::select_backward(Tensor grad_output, int[] input_sizes, int dim, int index) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: selu + operator_name: selu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::selu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: selu_ + operator_name: selu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::selu_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: celu + operator_name: celu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: celu_ + operator_name: celu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu + operator_name: silu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::silu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu_ + operator_name: silu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::silu_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu_out + operator_name: silu + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu_backward_out + operator_name: silu_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu_backward + operator_name: silu_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mish + operator_name: mish + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mish(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mish_ + operator_name: mish_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mish_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mish_out + operator_name: mish + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mish_backward + operator_name: mish_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sigmoid + operator_name: sigmoid + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sigmoid(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_ + operator_name: sigmoid_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_out + operator_name: sigmoid + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit + operator_name: logit + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logit(Tensor self, float? eps=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_ + operator_name: logit_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_out + operator_name: logit + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sin + operator_name: sin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sin(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sin_ + operator_name: sin_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sin_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sin_out + operator_name: sin + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinc + operator_name: sinc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sinc(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinc_ + operator_name: sinc_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sinc_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinc_out + operator_name: sinc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinh + operator_name: sinh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sinh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinh_ + operator_name: sinh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sinh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinh_out + operator_name: sinh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: detach + operator_name: detach + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::detach(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: detach_ + operator_name: detach_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::detach_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: size + operator_name: size + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::size.int(Tensor self, int dim) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: int64_t (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: size + operator_name: size + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::size.Dimname(Tensor self, Dimname dim) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: int64_t (const at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: slice + operator_name: slice + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_backward + operator_name: slice_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_backward(Tensor grad_output, int[] input_sizes, int dim, int start, int end, int step) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_scatter + operator_name: slice_scatter + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slice_scatter(Tensor self, Tensor src, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: start + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: end + type: c10::optional + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: select_scatter + operator_name: select_scatter + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::select_scatter(Tensor self, Tensor src, int dim, int index) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: diagonal_scatter + operator_name: diagonal_scatter + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slogdet + operator_name: slogdet + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: sign + name: sign + type: at::Tensor + - dynamic_type: at::Tensor + field_name: logabsdet + name: logabsdet + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smm + operator_name: smm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::smm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: softmax + operator_name: softmax + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: softmax + operator_name: softmax + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _softmax + operator_name: _softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _softmax_out + operator_name: _softmax + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _softmax_backward_data + operator_name: _softmax_backward_data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _softmax_backward_data_out + operator_name: _softmax_backward_data + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::ScalarType, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: input_dtype + type: at::ScalarType + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsafe_split + operator_name: unsafe_split + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: split + operator_name: split + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::split.Tensor(Tensor(a -> *) self, int split_size, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsafe_split_with_sizes + operator_name: unsafe_split_with_sizes + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: split_sizes + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: split_sizes + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: split_with_sizes + operator_name: split_with_sizes + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: split_sizes + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: split_sizes + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: hsplit + operator_name: hsplit + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hsplit + operator_name: hsplit + overload_name: array + manual_kernel_registration: false + category_override: '' + schema_string: aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: vsplit + operator_name: vsplit + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: vsplit + operator_name: vsplit + overload_name: array + manual_kernel_registration: false + category_override: '' + schema_string: aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: dsplit + operator_name: dsplit + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sections + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: dsplit + operator_name: dsplit + overload_name: array + manual_kernel_registration: false + category_override: '' + schema_string: aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: indices + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: squeeze + operator_name: squeeze + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::squeeze(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze + operator_name: squeeze + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze + operator_name: squeeze + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: squeeze_ + operator_name: squeeze_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::squeeze_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze_ + operator_name: squeeze_ + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze_ + operator_name: squeeze_ + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: sspaddmm + operator_name: sspaddmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sspaddmm_out + operator_name: sspaddmm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: stack + operator_name: stack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::stack(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: stack_out + operator_name: stack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::TensorList, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _stack + operator_name: _stack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_stack(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _stack_out + operator_name: _stack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::TensorList, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hstack + operator_name: hstack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hstack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: hstack_out + operator_name: hstack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: vstack + operator_name: vstack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::vstack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: vstack_out + operator_name: vstack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: dstack + operator_name: dstack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::dstack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: dstack_out + operator_name: dstack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: stft + operator_name: stft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: window + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: return_complex + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: window + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: return_complex + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: istft + operator_name: istft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: window + type: const c10::optional & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: center + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: length + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_complex + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, const c10::optional &, bool, bool, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: window + type: const c10::optional & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: center + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: length + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_complex + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: stride + operator_name: stride + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::stride.int(Tensor self, int dim) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: int64_t (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: stride + operator_name: stride + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::stride.Dimname(Tensor self, Dimname dim) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: int64_t (const at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: sum + operator_name: sum + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum + operator_name: sum + overload_name: dim_IntList + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum + operator_name: sum + overload_name: dim_DimnameList + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sum_out + operator_name: sum + overload_name: IntList_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum_out + operator_name: sum + overload_name: DimnameList_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nansum + operator_name: nansum + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nansum(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nansum + operator_name: nansum + overload_name: dim_IntList + manual_kernel_registration: false + category_override: '' + schema_string: aten::nansum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nansum_out + operator_name: nansum + overload_name: IntList_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nansum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum_to_size + operator_name: sum_to_size + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sum_to_size(Tensor self, int[] size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: sqrt + operator_name: sqrt + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sqrt(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sqrt_ + operator_name: sqrt_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sqrt_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sqrt_out + operator_name: sqrt + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: square + operator_name: square + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::square(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: square_ + operator_name: square_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::square_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: square_out + operator_name: square + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std + operator_name: std + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::std(Tensor self, bool unbiased=True) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std + operator_name: std + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std + operator_name: std + overload_name: correction + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std_mean + operator_name: std_mean + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std_mean + operator_name: std_mean + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std_mean + operator_name: std_mean + overload_name: correction + manual_kernel_registration: false + category_override: '' + schema_string: aten::std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std_mean + operator_name: std_mean + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std_mean + operator_name: std_mean + overload_name: correction_names + manual_kernel_registration: false + category_override: '' + schema_string: aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std_out + operator_name: std + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std_out + operator_name: std + overload_name: correction_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std + operator_name: std + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std_out + operator_name: std + overload_name: names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std + operator_name: std + overload_name: correction_names + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: std_out + operator_name: std + overload_name: correction_names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: prod + operator_name: prod + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod + operator_name: prod + overload_name: dim_int + manual_kernel_registration: false + category_override: '' + schema_string: aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod_out + operator_name: prod + overload_name: int_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod + operator_name: prod + overload_name: dim_Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: prod_out + operator_name: prod + overload_name: Dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: t + operator_name: t + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::t(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: t_ + operator_name: t_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::t_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: tan + operator_name: tan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tan_ + operator_name: tan_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tan_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tan_out + operator_name: tan + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh + operator_name: tanh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tanh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_ + operator_name: tanh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tanh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_out + operator_name: tanh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tensordot + operator_name: tensordot + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_self + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_other + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_self + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_other + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: tensordot_out + operator_name: tensordot + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_self + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_other + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_self + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims_other + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold + operator_name: threshold + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold_ + operator_name: threshold_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold_out + operator_name: threshold + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold_backward_out + operator_name: threshold_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold_backward + operator_name: threshold_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tile + operator_name: tile + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tile(Tensor self, int[] dims) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: transpose + operator_name: transpose + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: transpose + operator_name: transpose + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim0 + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim1 + type: at::Dimname + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, at::Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim0 + type: at::Dimname + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim1 + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: _mkldnn_transpose + operator_name: _mkldnn_transpose + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: transpose_ + operator_name: transpose_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mkldnn_transpose_ + operator_name: _mkldnn_transpose_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: one_hot + operator_name: one_hot + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::one_hot(Tensor self, int num_classes=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: num_classes + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: num_classes + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: flip + operator_name: flip + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::flip(Tensor self, int[] dims) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fliplr + operator_name: fliplr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fliplr(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: flipud + operator_name: flipud + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::flipud(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: roll + operator_name: roll + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: shifts + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + size: 1 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: shifts + size: 1 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + size: 1 + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rot90 + operator_name: rot90 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: '{0,1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: '{0,1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dims + type: at::IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trapezoid + operator_name: trapezoid + overload_name: x + manual_kernel_registration: false + category_override: '' + schema_string: aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: trapezoid + operator_name: trapezoid + overload_name: dx + manual_kernel_registration: false + category_override: '' + schema_string: aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: dx + type: const at::Scalar & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: dx + type: const at::Scalar & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: trapz + operator_name: trapz + overload_name: x + manual_kernel_registration: false + category_override: '' + schema_string: aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: trapz + operator_name: trapz + overload_name: dx + manual_kernel_registration: false + category_override: '' + schema_string: aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dx + type: double + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: y + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dx + type: double + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _trilinear + operator_name: _trilinear + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: i3 + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: expand3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sumdim + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triplet_margin_loss + operator_name: triplet_margin_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: anchor + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: positive + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: negative + type: const at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: swap + type: bool + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, double, double, bool, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: anchor + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: positive + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: negative + type: const at::Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: swap + type: bool + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: trunc + operator_name: trunc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::trunc(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trunc_ + operator_name: trunc_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::trunc_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trunc_out + operator_name: trunc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fix + operator_name: fix + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fix(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fix_ + operator_name: fix_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fix_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fix_out + operator_name: fix + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: type_as + operator_name: type_as + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::type_as(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _has_compatible_shallow_copy_type + operator_name: _has_compatible_shallow_copy_type + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: from + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: from + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _unique + operator_name: _unique + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unique_dim + operator_name: unique_dim + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unique_consecutive + operator_name: unique_consecutive + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unique_dim_consecutive + operator_name: unique_dim_consecutive + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unique2 + operator_name: _unique2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unsafe_view + operator_name: _unsafe_view + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_unsafe_view(Tensor self, int[] size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsqueeze + operator_name: unsqueeze + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsqueeze_ + operator_name: unsqueeze_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: vander + operator_name: vander + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: N + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: increasing + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: N + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: increasing + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var + operator_name: var + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::var(Tensor self, bool unbiased=True) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var + operator_name: var + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var + operator_name: var + overload_name: correction + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var_out + operator_name: var + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var_out + operator_name: var + overload_name: correction_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var + operator_name: var + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var_out + operator_name: var + overload_name: names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var + operator_name: var + overload_name: correction_names + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var_out + operator_name: var + overload_name: correction_names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::DimnameList, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var_mean + operator_name: var_mean + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var_mean + operator_name: var_mean + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var_mean + operator_name: var_mean + overload_name: correction + manual_kernel_registration: false + category_override: '' + schema_string: aten::var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var_mean + operator_name: var_mean + overload_name: names_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: var_mean + operator_name: var_mean + overload_name: correction_names + manual_kernel_registration: false + category_override: '' + schema_string: aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::DimnameList, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: true + kwarg_only: true + name: correction + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: view_as + operator_name: view_as + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: where + operator_name: where + overload_name: self + manual_kernel_registration: false + category_override: '' + schema_string: aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: where + operator_name: where + overload_name: ScalarSelf + manual_kernel_registration: false + category_override: '' + schema_string: aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: where + operator_name: where + overload_name: ScalarOther + manual_kernel_registration: false + category_override: '' + schema_string: aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: where + operator_name: where + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: where + operator_name: where + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::where(Tensor condition) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _s_where + operator_name: _s_where + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_s_where(Tensor condition, Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: condition + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_except_dim + operator_name: norm_except_dim + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: pow + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: pow + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _weight_norm + operator_name: _weight_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: g + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: g + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _weight_norm_cuda_interface + operator_name: _weight_norm_cuda_interface + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_norm_cuda_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: g + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: g + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_norm_cuda_interface_backward + operator_name: _weight_norm_cuda_interface_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_norm_cuda_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_w + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_g + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_norms + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_w + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_g + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_norms + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_norm_differentiable_backward + operator_name: _weight_norm_differentiable_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_w + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_g + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_norms + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_w + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_v + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_g + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: saved_norms + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: zeros + operator_name: zeros + overload_name: names + manual_kernel_registration: false + category_override: '' + schema_string: aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: _efficientzerotensor + operator_name: _efficientzerotensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: zeros + operator_name: zeros + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: zeros_out + operator_name: zeros + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: zeros_like + operator_name: zeros_like + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _standard_gamma_grad + operator_name: _standard_gamma_grad + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _standard_gamma + operator_name: _standard_gamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dirichlet_grad + operator_name: _dirichlet_grad + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: alpha + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: x + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: alpha + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sample_dirichlet + operator_name: _sample_dirichlet + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: poisson + operator_name: poisson + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::poisson(Tensor self, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binomial + operator_name: binomial + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: count + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: prob + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: count + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: prob + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_norm + operator_name: native_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_norm(Tensor self, Scalar p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_norm + operator_name: native_norm + overload_name: ScalarOpt_dim_dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sum(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: dim_dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_sum_backward + operator_name: _sparse_sum_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_softmax + operator_name: _sparse_softmax + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_softmax + operator_name: _sparse_softmax + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_softmax + operator_name: _sparse_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_softmax_backward_data + operator_name: _sparse_softmax_backward_data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_log_softmax + operator_name: _sparse_log_softmax + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_log_softmax + operator_name: _sparse_log_softmax + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_log_softmax + operator_name: _sparse_log_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_log_softmax_backward_data + operator_name: _sparse_log_softmax_backward_data + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: ScalarOpt_dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: ScalarOpt_dim_dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: ScalarOpt_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_out + operator_name: norm + overload_name: dtype_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::ScalarType, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_out + operator_name: norm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::IntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: names_ScalarOpt_dim_dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: norm + operator_name: norm + overload_name: names_ScalarOpt_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, at::DimnameList, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: norm_out + operator_name: norm + overload_name: names_dtype_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::ScalarType, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: at::ScalarType + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: norm_out + operator_name: norm + overload_name: names_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::DimnameList, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - annotation: null + dynamic_type: at::DimnameList + is_nullable: false + name: dim + size: 1 + type: at::DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: frexp + operator_name: frexp + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: mantissa + name: mantissa + type: at::Tensor + - dynamic_type: at::Tensor + field_name: exponent + name: exponent + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frexp_out + operator_name: frexp + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: mantissa + is_nullable: false + name: mantissa + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: exponent + is_nullable: false + name: exponent + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: mantissa + is_nullable: false + name: mantissa + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: exponent + is_nullable: false + name: exponent + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: mantissa + name: mantissa + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: exponent + name: exponent + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frobenius_norm + operator_name: frobenius_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::frobenius_norm(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: frobenius_norm + operator_name: frobenius_norm + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: frobenius_norm_out + operator_name: frobenius_norm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nuclear_norm + operator_name: nuclear_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nuclear_norm_out + operator_name: nuclear_norm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nuclear_norm + operator_name: nuclear_norm + overload_name: dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nuclear_norm_out + operator_name: nuclear_norm + overload_name: dim_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: clone + operator_name: clone + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: positive + operator_name: positive + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::positive(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: resize_as_ + operator_name: resize_as_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: the_template + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: the_template + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: const at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: resize_as_sparse_ + operator_name: resize_as_sparse_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: the_template + type: const at::Tensor & + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: the_template + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: const at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: zero_ + operator_name: zero_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::zero_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub_out + operator_name: sub + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub + operator_name: sub + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub_ + operator_name: sub_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub + operator_name: sub + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub_ + operator_name: sub_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: subtract_out + operator_name: subtract + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: subtract + operator_name: subtract + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: subtract_ + operator_name: subtract_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: subtract + operator_name: subtract + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: subtract_ + operator_name: subtract_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rsub + operator_name: rsub + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: heaviside_out + operator_name: heaviside + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: heaviside + operator_name: heaviside + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::heaviside(Tensor self, Tensor values) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: heaviside_ + operator_name: heaviside_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsub + operator_name: rsub + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_addmm + operator_name: _sparse_addmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sparse + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sparse + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dense + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_sampled_addmm_out + operator_name: sparse_sampled_addmm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_sampled_addmm + operator_name: sparse_sampled_addmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: sparse + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmm_out + operator_name: addmm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmm + operator_name: addmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmm_ + operator_name: addmm_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_csr_tensor + operator_name: sparse_csr_tensor + overload_name: crow_col_value_size + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sparse_csr_tensor + operator_name: sparse_csr_tensor + overload_name: crow_col_value + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_csr_tensor_unsafe + operator_name: _sparse_csr_tensor_unsafe + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sparse_coo_tensor + operator_name: sparse_coo_tensor + overload_name: size + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sparse_coo_tensor + operator_name: sparse_coo_tensor + overload_name: indices + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sparse_coo_tensor + operator_name: sparse_coo_tensor + overload_name: indices_size + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_coo_tensor_unsafe + operator_name: _sparse_coo_tensor_unsafe + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _validate_sparse_coo_tensor_args + operator_name: _validate_sparse_coo_tensor_args + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _validate_sparse_csr_tensor_args + operator_name: _validate_sparse_csr_tensor_args + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: void (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _sparse_coo_tensor_with_dims + operator_name: _sparse_coo_tensor_with_dims + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_coo_tensor_with_dims_and_tensors + operator_name: _sparse_coo_tensor_with_dims_and_tensors + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, at::IntArrayRef, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_resize_ + operator_name: sparse_resize_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: const at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_resize_and_clear_ + operator_name: sparse_resize_and_clear_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: const at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_mask + operator_name: sparse_mask + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_mask(Tensor self, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _to_cpu + operator_name: _to_cpu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_cpu(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: to_dense + operator_name: to_dense + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_dense_backward + operator_name: to_dense_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sparse_dim + operator_name: sparse_dim + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sparse_dim(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dimI + operator_name: _dimI + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_dimI(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: dense_dim + operator_name: dense_dim + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::dense_dim(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dimV + operator_name: _dimV + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_dimV(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nnz + operator_name: _nnz + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_nnz(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: coalesce + operator_name: coalesce + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::coalesce(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _coalesce + operator_name: _coalesce + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_coalesce(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_coalesced + operator_name: is_coalesced + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_coalesced(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _indices + operator_name: _indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_indices(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _values + operator_name: _values + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_values(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _coalesced_ + operator_name: _coalesced_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: indices + operator_name: indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::indices(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: values + operator_name: values + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::values(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: crow_indices + operator_name: crow_indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::crow_indices(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: col_indices + operator_name: col_indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::col_indices(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: hspmm_out + operator_name: hspmm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hspmm + operator_name: hspmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mat2 + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copy_sparse_to_sparse_ + operator_name: copy_sparse_to_sparse_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unbind + operator_name: unbind + overload_name: int + manual_kernel_registration: false + category_override: '' + schema_string: aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: ::std::vector (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unbind + operator_name: unbind + overload_name: Dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] + arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::Dimname) + schema_order_arguments: + - annotation: a -> * + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: to_sparse + operator_name: to_sparse + overload_name: sparse_dim + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_sparse + operator_name: to_sparse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_sparse(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_mkldnn + operator_name: to_mkldnn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_reorder_conv2d_weight + operator_name: mkldnn_reorder_conv2d_weight + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_reorder_conv3d_weight + operator_name: mkldnn_reorder_conv3d_weight + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_mkldnn_backward + operator_name: to_mkldnn_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantize_per_tensor_dynamic + operator_name: quantize_per_tensor_dynamic + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + - annotation: null + dynamic_type: bool + is_nullable: false + name: reduce_range + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + - annotation: null + dynamic_type: bool + is_nullable: false + name: reduce_range + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantize_per_tensor + operator_name: quantize_per_tensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, int64_t, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantize_per_tensor + operator_name: quantize_per_tensor + overload_name: tensor_qparams + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantize_per_tensor + operator_name: quantize_per_tensor + overload_name: tensors + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_points + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Tensor &, const at::Tensor &, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_points + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantize_per_channel + operator_name: quantize_per_channel + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_points + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scales + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_points + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dequantize + operator_name: dequantize + overload_name: self + manual_kernel_registration: false + category_override: '' + schema_string: aten::dequantize.self(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dequantize + operator_name: dequantize + overload_name: tensors + manual_kernel_registration: false + category_override: '' + schema_string: aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_scale + operator_name: q_scale + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::q_scale(Tensor self) -> float + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: double (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: double + name: result + type: double + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_zero_point + operator_name: q_zero_point + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::q_zero_point(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_per_channel_scales + operator_name: q_per_channel_scales + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::q_per_channel_scales(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_per_channel_zero_points + operator_name: q_per_channel_zero_points + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::q_per_channel_zero_points(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_per_channel_axis + operator_name: q_per_channel_axis + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::q_per_channel_axis(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: int64_t (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: int_repr + operator_name: int_repr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::int_repr(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _make_per_tensor_quantized_tensor + operator_name: _make_per_tensor_quantized_tensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _make_per_channel_quantized_tensor + operator_name: _make_per_channel_quantized_tensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: qscheme + operator_name: qscheme + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::qscheme(Tensor self) -> QScheme + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::QScheme (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::QScheme + name: result + type: at::QScheme + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fake_quantize_per_tensor_affine + operator_name: fake_quantize_per_tensor_affine + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fake_quantize_per_tensor_affine + operator_name: fake_quantize_per_tensor_affine + overload_name: tensor_qparams + manual_kernel_registration: false + category_override: '' + schema_string: aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fake_quantize_per_tensor_affine_cachemask + operator_name: fake_quantize_per_tensor_affine_cachemask + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, double, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: mask + name: mask + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams + operator_name: _fake_quantize_per_tensor_affine_cachemask_tensor_qparams + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: fake_quant_enabled + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: fake_quant_enabled + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: mask + name: mask + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fake_quantize_per_tensor_affine_cachemask_backward + operator_name: fake_quantize_per_tensor_affine_cachemask_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _fake_quantize_learnable_per_tensor_affine + operator_name: _fake_quantize_learnable_per_tensor_affine + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fake_quantize_learnable_per_tensor_affine_backward + operator_name: _fake_quantize_learnable_per_tensor_affine_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fake_quantize_per_channel_affine + operator_name: fake_quantize_per_channel_affine + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fake_quantize_per_channel_affine_cachemask + operator_name: fake_quantize_per_channel_affine_cachemask + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: mask + name: mask + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fake_quantize_per_channel_affine_cachemask_backward + operator_name: fake_quantize_per_channel_affine_cachemask_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _fake_quantize_learnable_per_channel_affine + operator_name: _fake_quantize_learnable_per_channel_affine + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fake_quantize_learnable_per_channel_affine_backward + operator_name: _fake_quantize_learnable_per_channel_affine_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: grad_factor + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fused_moving_avg_obs_fake_quant + operator_name: fused_moving_avg_obs_fake_quant + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: observer_on + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: fake_quant_on + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_min + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_max + type: at::Tensor & + - annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: at::Tensor & + - annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: averaging_const + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ch_axis + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: per_row_fake_quant + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric_quant + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: observer_on + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: fake_quant_on + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_min + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_max + type: at::Tensor & + - annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: at::Tensor & + - annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: averaging_const + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ch_axis + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: per_row_fake_quant + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric_quant + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _fused_moving_avg_obs_fq_helper + operator_name: _fused_moving_avg_obs_fq_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: observer_on + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: fake_quant_on + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_min + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_max + type: at::Tensor & + - annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: at::Tensor & + - annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: averaging_const + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ch_axis + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: per_row_fake_quant + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric_quant + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: observer_on + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: fake_quant_on + type: const at::Tensor & + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: running_min + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: running_max + type: at::Tensor & + - annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: scale + type: at::Tensor & + - annotation: d! + dynamic_type: at::Tensor + is_nullable: false + name: zero_point + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: averaging_const + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ch_axis + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: per_row_fake_quant + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric_quant + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: mask + name: mask + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _choose_qparams_per_tensor + operator_name: _choose_qparams_per_tensor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: reduce_range + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: reduce_range + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: double + name: result0 + type: double + - dynamic_type: int64_t + name: result1 + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _saturate_weight_to_fp16 + operator_name: _saturate_weight_to_fp16 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: choose_qparams_optimized + operator_name: choose_qparams_optimized + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: numel + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_bins + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: ratio + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: bit_width + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: numel + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_bins + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: ratio + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: bit_width + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _autocast_to_reduced_precision + operator_name: _autocast_to_reduced_precision + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: cuda_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cpu_enabled + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: cuda_dtype + type: at::ScalarType + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: cpu_dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool, bool, at::ScalarType, at::ScalarType) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: cuda_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cpu_enabled + type: bool + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: cuda_dtype + type: at::ScalarType + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: cpu_dtype + type: at::ScalarType + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: _autocast_to_full_precision + operator_name: _autocast_to_full_precision + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: cuda_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cpu_enabled + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: cuda_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cpu_enabled + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: _to_copy + operator_name: _to_copy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: non_blocking + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: non_blocking + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: to + operator_name: to + overload_name: dtype_layout + manual_kernel_registration: false + category_override: '' + schema_string: aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, bool, bool, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: to + operator_name: to + overload_name: device + manual_kernel_registration: false + category_override: '' + schema_string: aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Device + is_nullable: false + name: device + type: at::Device + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Device, at::ScalarType, bool, bool, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Device + is_nullable: false + name: device + type: at::Device + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: to + operator_name: to + overload_name: dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType, bool, bool, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: to + operator_name: to + overload_name: other + manual_kernel_registration: false + category_override: '' + schema_string: aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: meshgrid + operator_name: meshgrid + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::meshgrid(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: meshgrid + operator_name: meshgrid + overload_name: indexing + manual_kernel_registration: false + category_override: '' + schema_string: aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: indexing + type: c10::string_view + schema_order_cpp_signature: ::std::vector (at::TensorList, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: indexing + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cartesian_prod + operator_name: cartesian_prod + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cartesian_prod(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: combinations + operator_name: combinations + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: r + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: with_replacement + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: r + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: with_replacement + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: item + operator_name: item + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::item(Tensor self) -> Scalar + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Scalar (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: const at::Scalar & + name: result + type: at::Scalar + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: result_type + operator_name: result_type + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::ScalarType (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::ScalarType + name: result + type: at::ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: result_type + operator_name: result_type + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::ScalarType (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::ScalarType + name: result + type: at::ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: result_type + operator_name: result_type + overload_name: Scalar_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + schema_order_cpp_signature: at::ScalarType (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::ScalarType + name: result + type: at::ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: result_type + operator_name: result_type + overload_name: Scalar_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar1 + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar2 + type: const at::Scalar & + schema_order_cpp_signature: at::ScalarType (const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar1 + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar2 + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::ScalarType + name: result + type: at::ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: can_cast + operator_name: can_cast + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::can_cast(ScalarType from, ScalarType to) -> bool + arguments: + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: from + type: at::ScalarType + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: to + type: at::ScalarType + schema_order_cpp_signature: bool (at::ScalarType, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: from + type: at::ScalarType + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: to + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: promote_types + operator_name: promote_types + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType + arguments: + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: type1 + type: at::ScalarType + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: type2 + type: at::ScalarType + schema_order_cpp_signature: at::ScalarType (at::ScalarType, at::ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: type1 + type: at::ScalarType + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: type2 + type: at::ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::ScalarType + name: result + type: at::ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _local_scalar_dense + operator_name: _local_scalar_dense + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_local_scalar_dense(Tensor self) -> Scalar + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Scalar (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: const at::Scalar & + name: result + type: at::Scalar + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_fused_lstm_cell + operator_name: _thnn_fused_lstm_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cx + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cx + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_fused_lstm_cell_backward + operator_name: _thnn_fused_lstm_cell_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: workspace + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: workspace + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_differentiable_lstm_cell_backward + operator_name: _thnn_differentiable_lstm_cell_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cy + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_hy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: grad_cy + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: cy + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _thnn_fused_gru_cell + operator_name: _thnn_fused_gru_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_fused_gru_cell_backward + operator_name: _thnn_fused_gru_cell_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_hy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: workspace + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_hy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: workspace + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_differentiable_gru_cell_backward + operator_name: _thnn_differentiable_gru_cell_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_hy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_hy + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hidden_gates + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: input_bias + type: const c10::optional & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + - dynamic_type: at::Tensor + name: result3 + type: at::Tensor + - dynamic_type: at::Tensor + name: result4 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: lstm + operator_name: lstm + overload_name: input + manual_kernel_registration: false + category_override: '' + schema_string: aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: lstm + operator_name: lstm + overload_name: data + manual_kernel_registration: false + category_override: '' + schema_string: aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + - dynamic_type: at::Tensor + name: result2 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gru + operator_name: gru + overload_name: input + manual_kernel_registration: false + category_override: '' + schema_string: aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gru + operator_name: gru + overload_name: data + manual_kernel_registration: false + category_override: '' + schema_string: aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rnn_tanh + operator_name: rnn_tanh + overload_name: input + manual_kernel_registration: false + category_override: '' + schema_string: aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rnn_tanh + operator_name: rnn_tanh + overload_name: data + manual_kernel_registration: false + category_override: '' + schema_string: aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rnn_relu + operator_name: rnn_relu + overload_name: input + manual_kernel_registration: false + category_override: '' + schema_string: aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rnn_relu + operator_name: rnn_relu + overload_name: data + manual_kernel_registration: false + category_override: '' + schema_string: aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: params + type: at::TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: lstm_cell + operator_name: lstm_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gru_cell + operator_name: gru_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rnn_tanh_cell + operator_name: rnn_tanh_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: rnn_relu_cell + operator_name: rnn_relu_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_ih + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: b_hh + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantized_lstm_cell + operator_name: quantized_lstm_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: hx + type: at::TensorList + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantized_gru_cell + operator_name: quantized_gru_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantized_rnn_relu_cell + operator_name: quantized_rnn_relu_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantized_rnn_tanh_cell + operator_name: quantized_rnn_tanh_cell + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: hx + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: w_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: packed_hh + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_ih + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_offsets_hh + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale_hh + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_ih + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: zero_point_hh + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _pack_padded_sequence + operator_name: _pack_padded_sequence + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: lengths + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: lengths + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _pack_padded_sequence_backward + operator_name: _pack_padded_sequence_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _pad_packed_sequence + operator_name: _pad_packed_sequence + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: padding_value + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: total_length + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, bool, const at::Scalar &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch_sizes + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: padding_value + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: total_length + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: set_ + operator_name: set_ + overload_name: source_Storage + manual_kernel_registration: false + category_override: '' + schema_string: aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Storage + is_nullable: false + name: source + type: at::Storage + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Storage) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Storage + is_nullable: false + name: source + type: at::Storage + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_ + operator_name: set_ + overload_name: source_Storage_storage_offset + manual_kernel_registration: false + category_override: '' + schema_string: aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Storage + is_nullable: false + name: source + type: at::Storage + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: storage_offset + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Storage, int64_t, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Storage + is_nullable: false + name: source + type: at::Storage + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: storage_offset + type: int64_t + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + type: at::IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_ + operator_name: set_ + overload_name: source_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_ + operator_name: set_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::set_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_set_to + operator_name: is_set_to + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::is_set_to(Tensor self, Tensor tensor) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill_ + operator_name: masked_fill_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill + operator_name: masked_fill + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill_ + operator_name: masked_fill_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill + operator_name: masked_fill + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_scatter_ + operator_name: masked_scatter_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_scatter + operator_name: masked_scatter + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _masked_softmax + operator_name: _masked_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_masked_softmax(Tensor self, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: view + operator_name: view + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::view(Tensor(a) self, int[] size) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: view + operator_name: view + overload_name: dtype + manual_kernel_registration: false + category_override: '' + schema_string: aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::ScalarType) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::ScalarType + is_nullable: false + name: dtype + type: at::ScalarType + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: put_ + operator_name: put_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: put + operator_name: put + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_add_out + operator_name: index_add + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_add_ + operator_name: index_add_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_add + operator_name: index_add + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_add + operator_name: index_add + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_fill_ + operator_name: index_fill_ + overload_name: int_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill + operator_name: index_fill + overload_name: int_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill_ + operator_name: index_fill_ + overload_name: int_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill + operator_name: index_fill + overload_name: int_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill_ + operator_name: index_fill_ + overload_name: Dimname_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_fill_ + operator_name: index_fill_ + overload_name: Dimname_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_fill + operator_name: index_fill + overload_name: Dimname_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_fill + operator_name: index_fill + overload_name: Dimname_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: value + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: scatter + operator_name: scatter + overload_name: src + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: src + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_out + operator_name: scatter + overload_name: src_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: value + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: value + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_out + operator_name: scatter + overload_name: value_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: reduce + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: reduce + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_out + operator_name: scatter + overload_name: reduce_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: value_reduce + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: value_reduce + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, c10::string_view) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_out + operator_name: scatter + overload_name: value_reduce_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: reduce + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: dimname_src + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: scatter + operator_name: scatter + overload_name: dimname_value + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: scatter_add + operator_name: scatter_add + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_add_ + operator_name: scatter_add_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_add_out + operator_name: scatter_add + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_add + operator_name: scatter_add + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: src + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: eq_ + operator_name: eq_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq_ + operator_name: eq_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and_out + operator_name: bitwise_and + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and_out + operator_name: bitwise_and + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and + operator_name: bitwise_and + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and + operator_name: bitwise_and + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and_ + operator_name: bitwise_and_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bitwise_and_ + operator_name: bitwise_and_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __and__ + operator_name: __and__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __and__ + operator_name: __and__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __iand__ + operator_name: __iand__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __iand__ + operator_name: __iand__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bitwise_or_out + operator_name: bitwise_or + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or_out + operator_name: bitwise_or + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or + operator_name: bitwise_or + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bitwise_or + operator_name: bitwise_or + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or_ + operator_name: bitwise_or_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bitwise_or_ + operator_name: bitwise_or_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __or__ + operator_name: __or__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __or__ + operator_name: __or__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __ior__ + operator_name: __ior__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __ior__ + operator_name: __ior__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bitwise_xor_out + operator_name: bitwise_xor + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor_out + operator_name: bitwise_xor + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor + operator_name: bitwise_xor + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bitwise_xor + operator_name: bitwise_xor + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor_ + operator_name: bitwise_xor_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: bitwise_xor_ + operator_name: bitwise_xor_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __xor__ + operator_name: __xor__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __xor__ + operator_name: __xor__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __ixor__ + operator_name: __ixor__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __ixor__ + operator_name: __ixor__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: __lshift__ + operator_name: __lshift__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __lshift__ + operator_name: __lshift__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ilshift__ + operator_name: __ilshift__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ilshift__ + operator_name: __ilshift__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_left_shift + operator_name: bitwise_left_shift + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_left_shift_ + operator_name: bitwise_left_shift_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_left_shift_out + operator_name: bitwise_left_shift + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_left_shift + operator_name: bitwise_left_shift + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_left_shift_ + operator_name: bitwise_left_shift_ + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_left_shift_out + operator_name: bitwise_left_shift + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_left_shift + operator_name: bitwise_left_shift + overload_name: Scalar_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __rshift__ + operator_name: __rshift__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __rshift__ + operator_name: __rshift__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __irshift__ + operator_name: __irshift__ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __irshift__ + operator_name: __irshift__ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_right_shift + operator_name: bitwise_right_shift + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_right_shift_ + operator_name: bitwise_right_shift_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_right_shift_out + operator_name: bitwise_right_shift + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_right_shift + operator_name: bitwise_right_shift + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_right_shift_ + operator_name: bitwise_right_shift_ + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_right_shift_out + operator_name: bitwise_right_shift + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_right_shift + operator_name: bitwise_right_shift + overload_name: Scalar_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril_ + operator_name: tril_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triu_ + operator_name: triu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: digamma_ + operator_name: digamma_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::digamma_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp_ + operator_name: lerp_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp_ + operator_name: lerp_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addbmm_ + operator_name: addbmm_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addbmm_out + operator_name: addbmm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addbmm + operator_name: addbmm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: batch2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: beta + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: random_ + operator_name: random_ + overload_name: from + manual_kernel_registration: false + category_override: '' + schema_string: aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: from + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: to + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: from + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: to + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: random_ + operator_name: random_ + overload_name: to + manual_kernel_registration: false + category_override: '' + schema_string: aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: to + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: to + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: random_ + operator_name: random_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: uniform_ + operator_name: uniform_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: from + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: to + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: from + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: to + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cauchy_ + operator_name: cauchy_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: median + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: sigma + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: median + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: sigma + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_normal_ + operator_name: log_normal_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exponential_ + operator_name: exponential_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: lambd + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: lambd + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: geometric_ + operator_name: geometric_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag_out + operator_name: diag + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag + operator_name: diag + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diag(Tensor self, int diagonal=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag_backward + operator_name: diag_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::diag_backward(Tensor grad, int[] input_sizes, int diagonal) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: cross_out + operator_name: cross + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: cross + operator_name: cross + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: triu_out + operator_name: triu + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triu + operator_name: triu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::triu(Tensor self, int diagonal=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril_out + operator_name: tril + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril + operator_name: tril + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tril(Tensor self, int diagonal=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril_indices + operator_name: tril_indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triu_indices + operator_name: triu_indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trace + operator_name: trace + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::trace(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trace_backward + operator_name: trace_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::trace_backward(Tensor grad, int[] sizes) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: sizes + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: ne_out + operator_name: ne + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne + operator_name: ne + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::ne.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne_out + operator_name: ne + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne + operator_name: ne + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::ne.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne_ + operator_name: ne_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne_ + operator_name: ne_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: not_equal_out + operator_name: not_equal + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: not_equal + operator_name: not_equal + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: not_equal_out + operator_name: not_equal + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: not_equal + operator_name: not_equal + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: not_equal_ + operator_name: not_equal_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: not_equal_ + operator_name: not_equal_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: eq_out + operator_name: eq + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq + operator_name: eq + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::eq.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq_out + operator_name: eq + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq + operator_name: eq + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::eq.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_out + operator_name: ge + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge + operator_name: ge + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::ge.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_out + operator_name: ge + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge + operator_name: ge + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::ge.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_ + operator_name: ge_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_ + operator_name: ge_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_equal_out + operator_name: greater_equal + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_equal + operator_name: greater_equal + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_equal_out + operator_name: greater_equal + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_equal + operator_name: greater_equal + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_equal_ + operator_name: greater_equal_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_equal_ + operator_name: greater_equal_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: le_out + operator_name: le + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le + operator_name: le + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::le.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le_out + operator_name: le + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le + operator_name: le + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::le.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le_ + operator_name: le_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le_ + operator_name: le_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_equal_out + operator_name: less_equal + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_equal + operator_name: less_equal + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_equal_out + operator_name: less_equal + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_equal + operator_name: less_equal + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_equal_ + operator_name: less_equal_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_equal_ + operator_name: less_equal_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gt_out + operator_name: gt + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt + operator_name: gt + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::gt.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt_out + operator_name: gt + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt + operator_name: gt + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::gt.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt_ + operator_name: gt_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt_ + operator_name: gt_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_out + operator_name: greater + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater + operator_name: greater + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_out + operator_name: greater + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater + operator_name: greater + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_ + operator_name: greater_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: greater_ + operator_name: greater_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: lt_out + operator_name: lt + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt + operator_name: lt + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::lt.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt_out + operator_name: lt + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt + operator_name: lt + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::lt.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt_ + operator_name: lt_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt_ + operator_name: lt_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_out + operator_name: less + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less + operator_name: less + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::less.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_out + operator_name: less + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less + operator_name: less + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::less.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_ + operator_name: less_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: less_ + operator_name: less_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: take_out + operator_name: take + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: take + operator_name: take + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::take(Tensor self, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: take_along_dim_out + operator_name: take_along_dim + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: take_along_dim + operator_name: take_along_dim + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_select_out + operator_name: index_select + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_select + operator_name: index_select + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_select(Tensor self, int dim, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_select_out + operator_name: index_select + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_select + operator_name: index_select + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: index_select_backward + operator_name: index_select_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::index_select_backward(Tensor grad, int[] self_sizes, int dim, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: self_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: self_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: masked_select_out + operator_name: masked_select + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_select + operator_name: masked_select + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_select(Tensor self, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_select_backward + operator_name: masked_select_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mask + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: nonzero_out + operator_name: nonzero + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nonzero + operator_name: nonzero + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nonzero(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nonzero_numpy + operator_name: nonzero_numpy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nonzero_numpy(Tensor self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::vector (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: argwhere + operator_name: argwhere + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::argwhere(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gather_out + operator_name: gather + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gather + operator_name: gather + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gather_backward + operator_name: gather_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse_grad + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse_grad + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: gather_out + operator_name: gather + overload_name: dimname_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: gather + operator_name: gather + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _gather_sparse_backward + operator_name: _gather_sparse_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: addcmul_out + operator_name: addcmul + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcmul + operator_name: addcmul + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcmul_ + operator_name: addcmul_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcdiv_out + operator_name: addcdiv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcdiv + operator_name: addcdiv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcdiv_ + operator_name: addcdiv_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor1 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tensor2 + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: value + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cross_entropy_loss + operator_name: cross_entropy_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, float label_smoothing=0.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: label_smoothing + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: label_smoothing + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: lstsq_out + operator_name: lstsq + overload_name: X + manual_kernel_registration: false + category_override: '' + schema_string: aten::lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: QR + is_nullable: false + name: qr + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: QR + is_nullable: false + name: qr + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: solution + name: X + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: QR + name: qr + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lstsq + operator_name: lstsq + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: solution + name: solution + type: at::Tensor + - dynamic_type: at::Tensor + field_name: QR + name: QR + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triangular_solve_out + operator_name: triangular_solve + overload_name: X + manual_kernel_registration: false + category_override: '' + schema_string: aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: cloned_coefficient + is_nullable: false + name: M + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: cloned_coefficient + is_nullable: false + name: M + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: solution + name: X + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: cloned_coefficient + name: M + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triangular_solve + operator_name: triangular_solve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: solution + name: solution + type: at::Tensor + - dynamic_type: at::Tensor + field_name: cloned_coefficient + name: cloned_coefficient + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _linalg_check_errors + operator_name: _linalg_check_errors + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> () + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: info + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: api_name + type: c10::string_view + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_matrix + type: bool + schema_order_cpp_signature: void (const at::Tensor &, c10::string_view, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: info + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: api_name + type: c10::string_view + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: is_matrix + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_solve_triangular_out + operator_name: linalg_solve_triangular + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: unitriangular + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: unitriangular + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_solve_triangular + operator_name: linalg_solve_triangular + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: unitriangular + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: B + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: unitriangular + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: symeig_out + operator_name: symeig + overload_name: e + manual_kernel_registration: false + category_override: '' + schema_string: aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: V + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: V + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: e + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: eigenvectors + name: V + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: symeig + operator_name: symeig + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: eigenvalues + type: at::Tensor + - dynamic_type: at::Tensor + field_name: eigenvectors + name: eigenvectors_return + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _symeig_helper + operator_name: _symeig_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eig_out + operator_name: eig + overload_name: e + manual_kernel_registration: false + category_override: '' + schema_string: aten::eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: v + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: v + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: e + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: eigenvectors + name: v + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eig + operator_name: eig + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: eigenvalues + type: at::Tensor + - dynamic_type: at::Tensor + field_name: eigenvectors + name: eigenvectors_return + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: svd_out + operator_name: svd + overload_name: U + manual_kernel_registration: false + category_override: '' + schema_string: aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: S + is_nullable: false + name: S + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: V + is_nullable: false + name: V + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: S + is_nullable: false + name: S + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: V + is_nullable: false + name: V + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: S + name: S + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: V + name: V + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: svd + operator_name: svd + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor + - dynamic_type: at::Tensor + field_name: S + name: S + type: at::Tensor + - dynamic_type: at::Tensor + field_name: V + name: V + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: swapaxes + operator_name: swapaxes + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis1 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis1 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: swapaxes_ + operator_name: swapaxes_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis1 + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis1 + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: swapdims + operator_name: swapdims + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: swapdims_ + operator_name: swapdims_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: cholesky_out + operator_name: cholesky + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky + operator_name: cholesky + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cholesky(Tensor self, bool upper=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_solve_out + operator_name: cholesky_solve + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_solve + operator_name: cholesky_solve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cholesky_solve_helper + operator_name: _cholesky_solve_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: solve + operator_name: solve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: solution + name: solution + type: at::Tensor + - dynamic_type: at::Tensor + field_name: LU + name: LU + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: solve_out + operator_name: solve + overload_name: solution + manual_kernel_registration: false + category_override: '' + schema_string: aten::solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: solution + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: LU + is_nullable: false + name: lu + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: solution + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: LU + is_nullable: false + name: lu + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: solution + name: solution + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: LU + name: lu + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _solve_helper + operator_name: _solve_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_inverse + operator_name: cholesky_inverse + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_inverse_out + operator_name: cholesky_inverse + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: qr_out + operator_name: qr + overload_name: Q + manual_kernel_registration: false + category_override: '' + schema_string: aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: Q + is_nullable: false + name: Q + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: R + is_nullable: false + name: R + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: Q + is_nullable: false + name: Q + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: R + is_nullable: false + name: R + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: Q + name: Q + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: R + name: R + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: qr + operator_name: qr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: Q + name: Q + type: at::Tensor + - dynamic_type: at::Tensor + field_name: R + name: R + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: geqrf_out + operator_name: geqrf + overload_name: a + manual_kernel_registration: false + category_override: '' + schema_string: aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: a + is_nullable: false + name: a + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: tau + is_nullable: false + name: tau + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: a + is_nullable: false + name: a + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: tau + is_nullable: false + name: tau + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: a + name: a + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: tau + name: tau + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: geqrf + operator_name: geqrf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: a + name: a + type: at::Tensor + - dynamic_type: at::Tensor + field_name: tau + name: tau + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: orgqr + operator_name: orgqr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::orgqr(Tensor self, Tensor input2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: orgqr_out + operator_name: orgqr + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ormqr_out + operator_name: ormqr + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input3 + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input3 + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ormqr + operator_name: ormqr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input3 + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input2 + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input3 + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _lu_with_info + operator_name: _lu_with_info + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: pivot + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: check_errors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: pivot + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: check_errors + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: LU + name: LU + type: at::Tensor + - dynamic_type: at::Tensor + field_name: pivots + name: pivots + type: at::Tensor + - dynamic_type: at::Tensor + field_name: info + name: info + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: lu_solve_out + operator_name: lu_solve + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lu_solve + operator_name: lu_solve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lu_unpack + operator_name: lu_unpack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_data + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_pivots + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_data + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_pivots + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: P + name: P + type: at::Tensor + - dynamic_type: at::Tensor + field_name: L + name: L + type: at::Tensor + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lu_unpack_out + operator_name: lu_unpack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: P + is_nullable: false + name: P + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: L + is_nullable: false + name: L + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_data + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_pivots + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_data + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: LU_pivots + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_data + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unpack_pivots + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: P + is_nullable: false + name: P + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: L + is_nullable: false + name: L + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: P + name: P + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: L + name: L + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multinomial_out + operator_name: multinomial + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multinomial + operator_name: multinomial + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lgamma_out + operator_name: lgamma + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lgamma_ + operator_name: lgamma_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lgamma_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lgamma + operator_name: lgamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::lgamma(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: digamma_out + operator_name: digamma + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: digamma + operator_name: digamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::digamma(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polygamma_out + operator_name: polygamma + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (int64_t, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polygamma + operator_name: polygamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::polygamma(int n, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polygamma_ + operator_name: polygamma_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfinv + operator_name: erfinv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::erfinv(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfinv_ + operator_name: erfinv_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::erfinv_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfinv_out + operator_name: erfinv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: i0 + operator_name: i0 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::i0(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: i0_ + operator_name: i0_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::i0_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: i0_out + operator_name: i0 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sign + operator_name: sign + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sign(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sign_ + operator_name: sign_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sign_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sign_out + operator_name: sign + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: signbit + operator_name: signbit + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::signbit(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: signbit_out + operator_name: signbit + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dist + operator_name: dist + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan2_out + operator_name: atan2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan2_ + operator_name: atan2_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan2 + operator_name: atan2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::atan2(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctan2 + operator_name: arctan2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctan2(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arctan2_out + operator_name: arctan2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: arctan2_ + operator_name: arctan2_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: lerp_out + operator_name: lerp + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp_out + operator_name: lerp + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp + operator_name: lerp + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: weight + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp + operator_name: lerp + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: end + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histc_out + operator_name: histc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histc + operator_name: histc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: min + type: const at::Scalar & + - annotation: null + default: 0 + dynamic_type: const at::Scalar & + is_nullable: false + name: max + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histogram_out + operator_name: histogram + overload_name: bins_tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: hist + is_nullable: false + name: hist + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: bin_edges + is_nullable: false + name: bin_edges + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bins + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bins + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: hist + is_nullable: false + name: hist + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: bin_edges + is_nullable: false + name: bin_edges + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: hist + name: hist + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: bin_edges + name: bin_edges + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histogram + operator_name: histogram + overload_name: bins_tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bins + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: bins + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: hist + name: hist + type: at::Tensor + - dynamic_type: at::Tensor + field_name: bin_edges + name: bin_edges + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histogram_out + operator_name: histogram + overload_name: bin_ct_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: hist + is_nullable: false + name: hist + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: bin_edges + is_nullable: false + name: bin_edges + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: hist + is_nullable: false + name: hist + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: bin_edges + is_nullable: false + name: bin_edges + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: hist + name: hist + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: bin_edges + name: bin_edges + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histogram + operator_name: histogram + overload_name: bin_ct + manual_kernel_registration: false + category_override: '' + schema_string: aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, c10::optional>, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: hist + name: hist + type: at::Tensor + - dynamic_type: at::Tensor + field_name: bin_edges + name: bin_edges + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _histogramdd_bin_edges + operator_name: _histogramdd_bin_edges + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: bins + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: bins + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _histogramdd_from_bin_cts + operator_name: _histogramdd_from_bin_cts + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: bins + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional>, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: bins + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::ArrayRef + is_nullable: true + kwarg_only: true + name: range + type: c10::optional> + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _histogramdd_from_bin_tensors + operator_name: _histogramdd_from_bin_tensors + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: bins + type: at::TensorList + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::TensorList, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: bins + type: at::TensorList + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: weight + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: density + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_out + operator_name: fmod + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod + operator_name: fmod + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_ + operator_name: fmod_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_out + operator_name: fmod + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod + operator_name: fmod + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_ + operator_name: fmod_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hypot_out + operator_name: hypot + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hypot + operator_name: hypot + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hypot(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hypot_ + operator_name: hypot_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: igamma_out + operator_name: igamma + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: igamma + operator_name: igamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::igamma(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: igamma_ + operator_name: igamma_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: igammac_out + operator_name: igammac + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: igammac + operator_name: igammac + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::igammac(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: igammac_ + operator_name: igammac_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nextafter_out + operator_name: nextafter + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nextafter + operator_name: nextafter + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nextafter(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nextafter_ + operator_name: nextafter_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_out + operator_name: remainder + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder + operator_name: remainder + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_ + operator_name: remainder_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_out + operator_name: remainder + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder + operator_name: remainder + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_ + operator_name: remainder_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder + operator_name: remainder + overload_name: Scalar_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min + operator_name: min + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::min(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmin + operator_name: fmin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmin(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmin_out + operator_name: fmin + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmax + operator_name: fmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmax(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmax_out + operator_name: fmax + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: maximum + operator_name: maximum + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::maximum(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: maximum_out + operator_name: maximum + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: other + manual_kernel_registration: false + category_override: '' + schema_string: aten::max.other(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: max_out + operator_name: max + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: minimum + operator_name: minimum + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::minimum(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: minimum_out + operator_name: minimum + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min_out + operator_name: min + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: min + operator_name: min + overload_name: other + manual_kernel_registration: false + category_override: '' + schema_string: aten::min.other(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantile + operator_name: quantile + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantile_out + operator_name: quantile + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantile + operator_name: quantile + overload_name: scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional, bool, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: quantile_out + operator_name: quantile + overload_name: scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanquantile + operator_name: nanquantile + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanquantile_out + operator_name: nanquantile + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: q + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanquantile + operator_name: nanquantile + overload_name: scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional, bool, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nanquantile_out + operator_name: nanquantile + overload_name: scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, bool, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: '"linear"' + dynamic_type: c10::string_view + is_nullable: false + kwarg_only: true + name: interpolation + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sort_out + operator_name: sort + overload_name: values + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort_out + operator_name: sort + overload_name: values_stable + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, int64_t, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort + operator_name: sort + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort + operator_name: sort + overload_name: stable + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort_out + operator_name: sort + overload_name: dimname_values + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sort_out + operator_name: sort + overload_name: dimname_values_stable + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, at::Dimname, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sort + operator_name: sort + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: sort + operator_name: sort + overload_name: dimname_stable + manual_kernel_registration: false + category_override: '' + schema_string: aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::optional, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: stable + type: c10::optional + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + kwarg_only: true + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: msort_out + operator_name: msort + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: msort + operator_name: msort + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::msort(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: argsort + operator_name: argsort + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: argsort + operator_name: argsort + overload_name: dimname + manual_kernel_registration: false + category_override: '' + schema_string: aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Dimname + is_nullable: false + name: dim + type: at::Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: topk_out + operator_name: topk + overload_name: values + manual_kernel_registration: false + category_override: '' + schema_string: aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, bool, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: values + is_nullable: false + name: values + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: topk + operator_name: topk + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: values + name: values + type: at::Tensor + - dynamic_type: at::Tensor + field_name: indices + name: indices + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all + operator_name: all + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::all(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all_out + operator_name: all + overload_name: all_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any + operator_name: any + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::any(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any_out + operator_name: any + overload_name: all_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: renorm_out + operator_name: renorm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: maxnorm + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, int64_t, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: maxnorm + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: renorm + operator_name: renorm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: maxnorm + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, int64_t, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: maxnorm + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: renorm_ + operator_name: renorm_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: maxnorm + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, int64_t, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: maxnorm + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unfold + operator_name: unfold + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: unfold_backward + operator_name: unfold_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_in + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_in + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_sizes + type: at::IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: equal + operator_name: equal + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::equal(Tensor self, Tensor other) -> bool + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: bool (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_out + operator_name: pow + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow + operator_name: pow + overload_name: Tensor_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_out + operator_name: pow + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow + operator_name: pow + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_out + operator_name: pow + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow + operator_name: pow + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_ + operator_name: pow_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_ + operator_name: pow_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: float_power_out + operator_name: float_power + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: float_power + operator_name: float_power + overload_name: Tensor_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: float_power_out + operator_name: float_power + overload_name: Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: float_power + operator_name: float_power + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: float_power_out + operator_name: float_power + overload_name: Tensor_Scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: float_power + operator_name: float_power + overload_name: Tensor_Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: float_power_ + operator_name: float_power_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: exponent + type: const at::Scalar & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: float_power_ + operator_name: float_power_ + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: exponent + type: const at::Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: normal_ + operator_name: normal_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_out + operator_name: normal + overload_name: Tensor_float_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: Tensor_float + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_out + operator_name: normal + overload_name: float_Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (double, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: float_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (double, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_out + operator_name: normal + overload_name: Tensor_Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: Tensor_Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: mean + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: std + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: float_float + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (double, double, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: normal_out + operator_name: normal + overload_name: float_float_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (double, double, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: size + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: alias + operator_name: alias + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::alias(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _index_copy_ + operator_name: _index_copy_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: index + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: source + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _amp_foreach_non_finite_check_and_unscale_ + operator_name: _amp_foreach_non_finite_check_and_unscale_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: found_inf + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: inv_scale + type: const at::Tensor & + schema_order_cpp_signature: void (at::TensorList, at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: found_inf + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: inv_scale + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _amp_update_scale_ + operator_name: _amp_update_scale_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: growth_tracker + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: found_inf + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_growth_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_backoff_factor + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: growth_interval + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Tensor &, const at::Tensor &, double, double, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: growth_tracker + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: found_inf + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_growth_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_backoff_factor + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: growth_interval + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cat + operator_name: _cat + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cat(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (at::TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cat_out + operator_name: _cat + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (at::TensorList, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add + operator_name: _foreach_add + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_add.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sub.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul + operator_name: _foreach_mul + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_mul.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul_ + operator_name: _foreach_mul_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div + operator_name: _foreach_div + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_div.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scalar + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add + operator_name: _foreach_add + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_add.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sub.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + kwarg_only: true + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul + operator_name: _foreach_mul + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_mul.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul_ + operator_name: _foreach_mul_ + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div + operator_name: _foreach_div + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_div.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: other + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add + operator_name: _foreach_add + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_add.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sub.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div + operator_name: _foreach_div + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_div.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul + operator_name: _foreach_mul + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_mul.ScalarList(Tensor[] tensors, Scalar[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul_ + operator_name: _foreach_mul_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_exp + operator_name: _foreach_exp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_exp(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_zero_ + operator_name: _foreach_zero_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_zero_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_exp_ + operator_name: _foreach_exp_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_exp_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt + operator_name: _foreach_sqrt + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sqrt(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt_ + operator_name: _foreach_sqrt_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_abs + operator_name: _foreach_abs + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_abs(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_abs_ + operator_name: _foreach_abs_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_abs_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_acos + operator_name: _foreach_acos + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_acos(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_acos_ + operator_name: _foreach_acos_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_acos_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_asin + operator_name: _foreach_asin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_asin(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_asin_ + operator_name: _foreach_asin_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_asin_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_atan + operator_name: _foreach_atan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_atan(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_atan_ + operator_name: _foreach_atan_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_atan_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_ceil + operator_name: _foreach_ceil + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_ceil(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_ceil_ + operator_name: _foreach_ceil_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_ceil_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_cos + operator_name: _foreach_cos + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_cos(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_cos_ + operator_name: _foreach_cos_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_cos_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_cosh + operator_name: _foreach_cosh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_cosh(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_cosh_ + operator_name: _foreach_cosh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_cosh_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_erf + operator_name: _foreach_erf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_erf(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_erf_ + operator_name: _foreach_erf_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_erf_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_erfc + operator_name: _foreach_erfc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_erfc(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_erfc_ + operator_name: _foreach_erfc_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_erfc_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_expm1 + operator_name: _foreach_expm1 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_expm1(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_expm1_ + operator_name: _foreach_expm1_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_expm1_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_floor + operator_name: _foreach_floor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_floor(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_floor_ + operator_name: _foreach_floor_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_floor_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log + operator_name: _foreach_log + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log_ + operator_name: _foreach_log_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log10 + operator_name: _foreach_log10 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log10(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log10_ + operator_name: _foreach_log10_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log10_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log1p + operator_name: _foreach_log1p + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log1p(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log1p_ + operator_name: _foreach_log1p_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log1p_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log2 + operator_name: _foreach_log2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log2(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_log2_ + operator_name: _foreach_log2_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_log2_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_neg + operator_name: _foreach_neg + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_neg(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_neg_ + operator_name: _foreach_neg_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_neg_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tan + operator_name: _foreach_tan + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tan(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tan_ + operator_name: _foreach_tan_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tan_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh + operator_name: _foreach_tanh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_tanh_ + operator_name: _foreach_tanh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_tanh_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sin + operator_name: _foreach_sin + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sin(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sin_ + operator_name: _foreach_sin_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sin_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sinh + operator_name: _foreach_sinh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sinh(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sinh_ + operator_name: _foreach_sinh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sinh_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_round + operator_name: _foreach_round + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_round(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_round_ + operator_name: _foreach_round_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_round_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_lgamma + operator_name: _foreach_lgamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_lgamma(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_lgamma_ + operator_name: _foreach_lgamma_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_lgamma_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_frac + operator_name: _foreach_frac + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_frac(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_frac_ + operator_name: _foreach_frac_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_frac_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_reciprocal + operator_name: _foreach_reciprocal + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_reciprocal(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_reciprocal_ + operator_name: _foreach_reciprocal_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sigmoid + operator_name: _foreach_sigmoid + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sigmoid(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sigmoid_ + operator_name: _foreach_sigmoid_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_trunc + operator_name: _foreach_trunc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_trunc(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_trunc_ + operator_name: _foreach_trunc_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_trunc_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + schema_order_cpp_signature: void (at::TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: void (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: at::TensorList + is_nullable: false + name: self + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcdiv.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcmul.Scalar(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: value + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcdiv.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: ScalarList + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_addcmul.ScalarList(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList, at::TensorList, at::ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: input + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensor2 + type: at::TensorList + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: false + name: scalars + type: at::ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_maximum + operator_name: _foreach_maximum + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_maximum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_minimum + operator_name: _foreach_minimum + overload_name: List + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_minimum.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + schema_order_cpp_signature: ::std::vector (at::TensorList, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors1 + type: at::TensorList + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors2 + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_norm + operator_name: _foreach_norm + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::_foreach_norm.Scalar(Tensor[] tensors, Scalar ord=2) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + schema_order_cpp_signature: ::std::vector (at::TensorList, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize_out + operator_name: bucketize + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: boundaries + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted + operator_name: searchsorted + overload_name: Tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _torch_cuda_cu_linker_symbol_op + operator_name: _torch_cuda_cu_linker_symbol_op + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_torch_cuda_cu_linker_symbol_op(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted_out + operator_name: searchsorted + overload_name: Tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, c10::optional, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted + operator_name: searchsorted + overload_name: Scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool, c10::optional, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: sorted_sequence + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: side + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: sorter + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convert_indices_from_coo_to_csr + operator_name: _convert_indices_from_coo_to_csr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convert_indices_from_coo_to_csr_out + operator_name: _convert_indices_from_coo_to_csr + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convert_indices_from_csr_to_coo + operator_name: _convert_indices_from_csr_to_coo + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: transpose + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: transpose + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convert_indices_from_csr_to_coo_out + operator_name: _convert_indices_from_csr_to_coo + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: transpose + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: crow_indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: col_indices + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: transpose + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss_out + operator_name: mse_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss + operator_name: mse_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss_backward_out + operator_name: mse_loss_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss_backward + operator_name: mse_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss_out + operator_name: l1_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss + operator_name: l1_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss_backward_out + operator_name: l1_loss_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss_backward + operator_name: l1_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss_out + operator_name: multi_margin_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss + operator_name: multi_margin_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss_backward_out + operator_name: multi_margin_loss_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss_backward + operator_name: multi_margin_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: p + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: margin + type: const at::Scalar & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_out + operator_name: multilabel_margin_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: multilabel_margin_loss + operator_name: multilabel_margin_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: multilabel_margin_loss_forward_out + operator_name: multilabel_margin_loss_forward + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: is_target + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: is_target + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + - dynamic_type: at::Tensor + name: is_target + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_forward + operator_name: multilabel_margin_loss_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: is_target + name: is_target + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_backward_out + operator_name: multilabel_margin_loss_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: is_target + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: is_target + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_backward + operator_name: multilabel_margin_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: is_target + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: is_target + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_out + operator_name: nll_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nll_loss_nd + operator_name: nll_loss_nd + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nll_loss + operator_name: nll_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nll_loss_forward_out + operator_name: nll_loss_forward + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + - dynamic_type: at::Tensor + name: total_weight + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_forward + operator_name: nll_loss_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: total_weight + name: total_weight + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_backward_out + operator_name: nll_loss_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_backward + operator_name: nll_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_out + operator_name: nll_loss2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nll_loss2d + operator_name: nll_loss2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: nll_loss2d_forward_out + operator_name: nll_loss2d_forward + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + - dynamic_type: at::Tensor + name: total_weight + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_forward + operator_name: nll_loss2d_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: total_weight + name: total_weight + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_backward_out + operator_name: nll_loss2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_backward + operator_name: nll_loss2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, int64_t, int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: weight + type: const c10::optional & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: total_weight + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss_out + operator_name: smooth_l1_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss + operator_name: smooth_l1_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss_backward_out + operator_name: smooth_l1_loss_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss_backward + operator_name: smooth_l1_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: huber_loss_out + operator_name: huber_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: delta + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: delta + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: huber_loss + operator_name: huber_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: delta + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: delta + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: huber_loss_backward_out + operator_name: huber_loss_backward + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: delta + type: double + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: delta + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: huber_loss_backward + operator_name: huber_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: delta + type: double + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: delta + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss_out + operator_name: soft_margin_loss + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss + operator_name: soft_margin_loss + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss_backward_out + operator_name: soft_margin_loss_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss_backward + operator_name: soft_margin_loss_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: target + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_out + operator_name: elu + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu + operator_name: elu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_backward_out + operator_name: elu_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_result + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self_or_result + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, bool, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_result + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self_or_result + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_backward + operator_name: elu_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_result + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self_or_result + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &, bool, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: is_result + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self_or_result + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_ + operator_name: elu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: scale + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: input_scale + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu_out + operator_name: glu + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu + operator_name: glu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::glu(Tensor self, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu_backward_out + operator_name: glu_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu_backward + operator_name: glu_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid_out + operator_name: hardsigmoid + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid + operator_name: hardsigmoid + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardsigmoid(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid_ + operator_name: hardsigmoid_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid_backward_out + operator_name: hardsigmoid_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid_backward + operator_name: hardsigmoid_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_out + operator_name: hardtanh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh + operator_name: hardtanh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_backward_out + operator_name: hardtanh_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_backward + operator_name: hardtanh_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_ + operator_name: hardtanh_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: -1 + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: -1 + dynamic_type: const at::Scalar & + is_nullable: false + name: min_val + type: const at::Scalar & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: max_val + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish_out + operator_name: hardswish + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish + operator_name: hardswish + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardswish(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish_ + operator_name: hardswish_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardswish_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish_backward + operator_name: hardswish_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu_out + operator_name: leaky_relu + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.01 + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.01 + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu + operator_name: leaky_relu + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.01 + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.01 + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu_backward_out + operator_name: leaky_relu_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu_backward + operator_name: leaky_relu_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu_ + operator_name: leaky_relu_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0.01 + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + default: 0.01 + dynamic_type: const at::Scalar & + is_nullable: false + name: negative_slope + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_out + operator_name: log_sigmoid + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: log_sigmoid + operator_name: log_sigmoid + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_sigmoid(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: log_sigmoid_forward_out + operator_name: log_sigmoid_forward + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: buffer + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: buffer + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + - dynamic_type: at::Tensor + name: buffer + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_forward + operator_name: log_sigmoid_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + field_name: output + name: output + type: at::Tensor + - dynamic_type: at::Tensor + field_name: buffer + name: buffer + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_backward_out + operator_name: log_sigmoid_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: buffer + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: buffer + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_backward + operator_name: log_sigmoid_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: buffer + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: buffer + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise_out + operator_name: rrelu_with_noise + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise + operator_name: rrelu_with_noise + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise_backward + operator_name: rrelu_with_noise_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise_ + operator_name: rrelu_with_noise_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, bool, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: noise + type: const at::Tensor & + - annotation: null + default: 0.125 + dynamic_type: const at::Scalar & + is_nullable: false + name: lower + type: const at::Scalar & + - annotation: null + default: 0.3333333333333333 + dynamic_type: const at::Scalar & + is_nullable: false + name: upper + type: const at::Scalar & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus_out + operator_name: softplus + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + default: 20 + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + default: 20 + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus + operator_name: softplus + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + default: 20 + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + default: 20 + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus_backward_out + operator_name: softplus_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus_backward + operator_name: softplus_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: beta + type: const at::Scalar & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: threshold + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink_out + operator_name: softshrink + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink + operator_name: softshrink + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0.5 + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink_backward_out + operator_name: softshrink_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink_backward + operator_name: softshrink_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: lambd + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool2d_out + operator_name: adaptive_avg_pool2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool2d + operator_name: adaptive_avg_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: mkldnn_adaptive_avg_pool2d + operator_name: mkldnn_adaptive_avg_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_adaptive_avg_pool2d_backward + operator_name: mkldnn_adaptive_avg_pool2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _adaptive_avg_pool2d + operator_name: _adaptive_avg_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _adaptive_avg_pool2d_backward + operator_name: _adaptive_avg_pool2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool3d_out + operator_name: adaptive_avg_pool3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool3d + operator_name: adaptive_avg_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _adaptive_avg_pool3d + operator_name: _adaptive_avg_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool3d_backward_out + operator_name: adaptive_avg_pool3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _adaptive_avg_pool3d_backward + operator_name: _adaptive_avg_pool3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d_out + operator_name: adaptive_max_pool2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + - dynamic_type: at::Tensor + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d + operator_name: adaptive_max_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d_backward_out + operator_name: adaptive_max_pool2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d_backward + operator_name: adaptive_max_pool2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d_out + operator_name: adaptive_max_pool3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + - dynamic_type: at::Tensor + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d + operator_name: adaptive_max_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d_backward_out + operator_name: adaptive_max_pool3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d_backward + operator_name: adaptive_max_pool3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d_out + operator_name: avg_pool2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d + operator_name: avg_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d_backward_out + operator_name: avg_pool2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d_backward + operator_name: avg_pool2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d_out + operator_name: avg_pool3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d + operator_name: avg_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d_backward_out + operator_name: avg_pool3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d_backward + operator_name: avg_pool3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d_out + operator_name: fractional_max_pool2d + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + - dynamic_type: at::Tensor + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d + operator_name: fractional_max_pool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d_backward_out + operator_name: fractional_max_pool2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d_backward + operator_name: fractional_max_pool2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d_out + operator_name: fractional_max_pool3d + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + - dynamic_type: at::Tensor + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d + operator_name: fractional_max_pool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: random_samples + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d_backward_out + operator_name: fractional_max_pool3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d_backward + operator_name: fractional_max_pool3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices_out + operator_name: max_pool2d_with_indices + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + - dynamic_type: at::Tensor + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices + operator_name: max_pool2d_with_indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices_backward_out + operator_name: max_pool2d_with_indices_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices_backward + operator_name: max_pool2d_with_indices_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices_out + operator_name: max_pool3d_with_indices + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: indices + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + - dynamic_type: at::Tensor + name: indices + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices + operator_name: max_pool3d_with_indices + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices_backward_out + operator_name: max_pool3d_with_indices_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices_backward + operator_name: max_pool3d_with_indices_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d_out + operator_name: max_unpool2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d + operator_name: max_unpool2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d_backward_out + operator_name: max_unpool2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d_backward + operator_name: max_unpool2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d_out + operator_name: max_unpool3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d + operator_name: max_unpool3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d_backward_out + operator_name: max_unpool3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d_backward + operator_name: max_unpool3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: indices + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d_out + operator_name: reflection_pad1d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d + operator_name: reflection_pad1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad1d(Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d_backward_out + operator_name: reflection_pad1d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d_backward + operator_name: reflection_pad1d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d_out + operator_name: reflection_pad2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d + operator_name: reflection_pad2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad2d(Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d_backward_out + operator_name: reflection_pad2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d_backward + operator_name: reflection_pad2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad3d_out + operator_name: reflection_pad3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad3d + operator_name: reflection_pad3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad3d(Tensor self, int[6] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad3d_backward_out + operator_name: reflection_pad3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad3d_backward + operator_name: reflection_pad3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d_out + operator_name: replication_pad1d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d + operator_name: replication_pad1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad1d(Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d_backward_out + operator_name: replication_pad1d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d_backward + operator_name: replication_pad1d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d_out + operator_name: replication_pad2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d + operator_name: replication_pad2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad2d(Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d_backward_out + operator_name: replication_pad2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d_backward + operator_name: replication_pad2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 4 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d_out + operator_name: replication_pad3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d + operator_name: replication_pad3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad3d(Tensor self, int[6] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d_backward_out + operator_name: replication_pad3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d_backward + operator_name: replication_pad3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 6 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d + operator_name: upsample_linear1d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_backward + operator_name: upsample_linear1d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_linear1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d + operator_name: upsample_bilinear2d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bilinear2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_backward + operator_name: upsample_bilinear2d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bilinear2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bilinear2d_aa + operator_name: _upsample_bilinear2d_aa + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bilinear2d_aa.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bilinear2d_aa_backward + operator_name: _upsample_bilinear2d_aa_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bilinear2d_aa_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d + operator_name: upsample_trilinear3d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_backward + operator_name: upsample_trilinear3d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_trilinear3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d + operator_name: upsample_bicubic2d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bicubic2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_backward + operator_name: upsample_bicubic2d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bicubic2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bicubic2d_aa + operator_name: _upsample_bicubic2d_aa + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bicubic2d_aa.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bicubic2d_aa_backward + operator_name: _upsample_bicubic2d_aa_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bicubic2d_aa_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d + operator_name: upsample_nearest1d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact1d + operator_name: _upsample_nearest_exact1d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_backward + operator_name: upsample_nearest1d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact1d_backward + operator_name: _upsample_nearest_exact1d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d + operator_name: upsample_nearest2d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact2d + operator_name: _upsample_nearest_exact2d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_backward + operator_name: upsample_nearest2d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact2d_backward + operator_name: _upsample_nearest_exact2d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d + operator_name: upsample_nearest3d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact3d + operator_name: _upsample_nearest_exact3d + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_backward + operator_name: upsample_nearest3d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact3d_backward + operator_name: _upsample_nearest_exact3d_backward + overload_name: vec + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + type: at::IntArrayRef + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_out + operator_name: upsample_linear1d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d + operator_name: upsample_linear1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_backward_out + operator_name: upsample_linear1d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_backward + operator_name: upsample_linear1d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_out + operator_name: upsample_bilinear2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d + operator_name: upsample_bilinear2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_backward_out + operator_name: upsample_bilinear2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_backward + operator_name: upsample_bilinear2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bilinear2d_aa_out + operator_name: _upsample_bilinear2d_aa + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bilinear2d_aa.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bilinear2d_aa + operator_name: _upsample_bilinear2d_aa + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bilinear2d_aa(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bilinear2d_aa_backward_out + operator_name: _upsample_bilinear2d_aa_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bilinear2d_aa_backward + operator_name: _upsample_bilinear2d_aa_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_out + operator_name: upsample_bicubic2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d + operator_name: upsample_bicubic2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_backward_out + operator_name: upsample_bicubic2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_backward + operator_name: upsample_bicubic2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bicubic2d_aa_out + operator_name: _upsample_bicubic2d_aa + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bicubic2d_aa.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bicubic2d_aa + operator_name: _upsample_bicubic2d_aa + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bicubic2d_aa(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bicubic2d_aa_backward_out + operator_name: _upsample_bicubic2d_aa_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_bicubic2d_aa_backward + operator_name: _upsample_bicubic2d_aa_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_out + operator_name: upsample_trilinear3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d + operator_name: upsample_trilinear3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_backward_out + operator_name: upsample_trilinear3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_backward + operator_name: upsample_trilinear3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_out + operator_name: upsample_nearest1d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact1d_out + operator_name: _upsample_nearest_exact1d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d + operator_name: upsample_nearest1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact1d + operator_name: _upsample_nearest_exact1d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_backward_out + operator_name: upsample_nearest1d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact1d_backward_out + operator_name: _upsample_nearest_exact1d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_backward + operator_name: upsample_nearest1d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact1d_backward + operator_name: _upsample_nearest_exact1d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_out + operator_name: upsample_nearest2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest2d.out(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact2d_out + operator_name: _upsample_nearest_exact2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact2d.out(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d + operator_name: upsample_nearest2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact2d + operator_name: _upsample_nearest_exact2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_backward_out + operator_name: upsample_nearest2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact2d_backward_out + operator_name: _upsample_nearest_exact2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_backward + operator_name: upsample_nearest2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact2d_backward + operator_name: _upsample_nearest_exact2d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_out + operator_name: upsample_nearest3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact3d_out + operator_name: _upsample_nearest_exact3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d + operator_name: upsample_nearest3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact3d + operator_name: _upsample_nearest_exact3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_backward_out + operator_name: upsample_nearest3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact3d_backward_out + operator_name: _upsample_nearest_exact3d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_backward + operator_name: upsample_nearest3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _upsample_nearest_exact3d_backward + operator_name: _upsample_nearest_exact3d_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_upsample_nearest_exact3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_backward_out + operator_name: sigmoid_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_backward + operator_name: sigmoid_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_backward_out + operator_name: logit_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_backward + operator_name: logit_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_backward_out + operator_name: tanh_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_backward + operator_name: tanh_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose2d_out + operator_name: slow_conv_transpose2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose2d + operator_name: slow_conv_transpose2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose3d_out + operator_name: slow_conv_transpose3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose3d + operator_name: slow_conv_transpose3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv2d_out + operator_name: thnn_conv2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: thnn_conv2d + operator_name: thnn_conv2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _slow_conv2d_forward_out + operator_name: _slow_conv2d_forward + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _slow_conv2d_forward + operator_name: _slow_conv2d_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _slow_conv2d_backward_out + operator_name: _slow_conv2d_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: grad_weight + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: grad_bias + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: grad_weight + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: grad_bias + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + - dynamic_type: at::Tensor + name: grad_weight + type: at::Tensor & + - dynamic_type: at::Tensor + name: grad_bias + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _slow_conv2d_backward + operator_name: _slow_conv2d_backward + overload_name: output_mask + manual_kernel_registration: false + category_override: '' + schema_string: aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, ::std::array) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: ::std::array + is_nullable: false + name: output_mask + type: ::std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + field_name: grad_input + name: grad_input + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_weight + name: grad_weight + type: at::Tensor + - dynamic_type: at::Tensor + field_name: grad_bias + name: grad_bias + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _conv_depthwise2d_out + operator_name: _conv_depthwise2d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: const at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _conv_depthwise2d + operator_name: _conv_depthwise2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv_depthwise3d + operator_name: conv_depthwise3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d_out + operator_name: slow_conv3d + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: slow_conv3d + operator_name: slow_conv3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: slow_conv3d_forward_out + operator_name: slow_conv3d_forward + overload_name: output + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: output + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: output + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d_forward + operator_name: slow_conv3d_forward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_dilated2d + operator_name: slow_conv_dilated2d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_dilated3d + operator_name: slow_conv_dilated3d + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: weight + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: at::IntArrayRef + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + name: bias + type: const c10::optional & + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 3 + type: at::IntArrayRef + - annotation: null + default: 0 + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 3 + type: at::IntArrayRef + - annotation: null + default: 1 + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im_out + operator_name: col2im + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im + operator_name: col2im + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im_backward_out + operator_name: col2im_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im_backward + operator_name: col2im_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::col2im_backward(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: column_stack + operator_name: column_stack + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::column_stack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: column_stack_out + operator_name: column_stack + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: im2col_out + operator_name: im2col + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: im2col + operator_name: im2col + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: im2col_backward_out + operator_name: im2col_backward + overload_name: grad_input + manual_kernel_registration: false + category_override: '' + schema_string: aten::im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: grad_input + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: grad_input + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: im2col_backward + operator_name: im2col_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::im2col_backward(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad_output + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: padding + size: 2 + type: at::IntArrayRef + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: stride + size: 2 + type: at::IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isfinite + operator_name: isfinite + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::isfinite(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: true +- name: isinf + operator_name: isinf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::isinf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: record_stream + operator_name: record_stream + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::record_stream(Tensor(a!) self, Stream s) -> () + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Stream + is_nullable: false + name: s + type: at::Stream + schema_order_cpp_signature: void (at::Tensor &, at::Stream) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: null + dynamic_type: at::Stream + is_nullable: false + name: s + type: at::Stream + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isposinf + operator_name: isposinf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::isposinf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isposinf_out + operator_name: isposinf + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isneginf + operator_name: isneginf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::isneginf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isneginf_out + operator_name: isneginf + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_batch_dim + operator_name: _add_batch_dim + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _remove_batch_dim + operator_name: _remove_batch_dim + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_entr + operator_name: special_entr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_entr(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_entr_out + operator_name: special_entr + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_ndtri + operator_name: special_ndtri + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_ndtri(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_ndtri_out + operator_name: special_ndtri + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_expm1 + operator_name: special_expm1 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_expm1(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_expm1_out + operator_name: special_expm1 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_exp2 + operator_name: special_exp2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_exp2(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_exp2_out + operator_name: special_exp2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_psi + operator_name: special_psi + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_psi(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_psi_out + operator_name: special_psi + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_digamma + operator_name: special_digamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_digamma(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_digamma_out + operator_name: special_digamma + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_gammaln + operator_name: special_gammaln + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_gammaln(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_gammaln_out + operator_name: special_gammaln + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_erf + operator_name: special_erf + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_erf_out + operator_name: special_erf + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_erfc + operator_name: special_erfc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erfc(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_erfc_out + operator_name: special_erfc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_erfcx + operator_name: special_erfcx + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erfcx(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_erfcx_out + operator_name: special_erfcx + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_erfinv + operator_name: special_erfinv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erfinv(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_erfinv_out + operator_name: special_erfinv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_ndtr + operator_name: special_ndtr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_ndtr(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_ndtr_out + operator_name: special_ndtr + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_xlog1py + operator_name: special_xlog1py + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlog1py(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_xlog1py + operator_name: special_xlog1py + overload_name: self_scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_xlog1py + operator_name: special_xlog1py + overload_name: other_scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_xlog1py_out + operator_name: special_xlog1py + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_xlog1py_out + operator_name: special_xlog1py + overload_name: self_scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_xlog1py_out + operator_name: special_xlog1py + overload_name: other_scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_xlogy + operator_name: special_xlogy + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlogy(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_xlogy + operator_name: special_xlogy + overload_name: self_scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_xlogy + operator_name: special_xlogy + overload_name: other_scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_xlogy_out + operator_name: special_xlogy + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_xlogy_out + operator_name: special_xlogy + overload_name: self_scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_xlogy_out + operator_name: special_xlogy + overload_name: other_scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_zeta + operator_name: special_zeta + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_zeta(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_zeta + operator_name: special_zeta + overload_name: self_scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Scalar &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_zeta + operator_name: special_zeta + overload_name: other_scalar + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_zeta_out + operator_name: special_zeta + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_zeta_out + operator_name: special_zeta + overload_name: self_scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: self + type: const at::Scalar & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_zeta_out + operator_name: special_zeta + overload_name: other_scalar_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: other + type: const at::Scalar & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_i0 + operator_name: special_i0 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i0(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_i0_out + operator_name: special_i0 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_i0e + operator_name: special_i0e + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i0e(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_i0e_out + operator_name: special_i0e + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_i1 + operator_name: special_i1 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i1(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_i1_out + operator_name: special_i1 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_i1e + operator_name: special_i1e + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i1e(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_i1e_out + operator_name: special_i1e + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: special_logit + operator_name: special_logit + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_logit(Tensor self, float? eps=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_logit_out + operator_name: special_logit + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_polygamma + operator_name: special_polygamma + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_polygamma(int n, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (int64_t, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_polygamma_out + operator_name: special_polygamma + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (int64_t, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_logsumexp + operator_name: special_logsumexp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, at::IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_logsumexp_out + operator_name: special_logsumexp + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_expit + operator_name: special_expit + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_expit(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_expit_out + operator_name: special_expit + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_sinc + operator_name: special_sinc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_sinc(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_sinc_out + operator_name: special_sinc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_round + operator_name: special_round + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_round(Tensor self, *, int decimals=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_round_out + operator_name: special_round + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: decimals + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_log1p + operator_name: special_log1p + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_log1p(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_log1p_out + operator_name: special_log1p + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_log_softmax + operator_name: special_log_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_gammainc_out + operator_name: special_gammainc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_gammainc + operator_name: special_gammainc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_gammainc(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_gammaincc_out + operator_name: special_gammaincc + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_gammaincc + operator_name: special_gammaincc + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_gammaincc(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_multigammaln + operator_name: special_multigammaln + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_multigammaln(Tensor self, int p) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_multigammaln_out + operator_name: special_multigammaln + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: special_softmax + operator_name: special_softmax + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: special + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fft + operator_name: fft_fft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fft_out + operator_name: fft_fft + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ifft + operator_name: fft_ifft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ifft_out + operator_name: fft_ifft + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfft + operator_name: fft_rfft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfft_out + operator_name: fft_rfft + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_irfft + operator_name: fft_irfft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_irfft_out + operator_name: fft_irfft + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_hfft + operator_name: fft_hfft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_hfft_out + operator_name: fft_hfft + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ihfft + operator_name: fft_ihfft + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ihfft_out + operator_name: fft_ihfft + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, int64_t, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fft2 + operator_name: fft_fft2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fft2_out + operator_name: fft_fft2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ifft2 + operator_name: fft_ifft2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ifft2_out + operator_name: fft_ifft2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfft2 + operator_name: fft_rfft2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfft2_out + operator_name: fft_rfft2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_irfft2 + operator_name: fft_irfft2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_irfft2_out + operator_name: fft_irfft2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_hfft2 + operator_name: fft_hfft2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_hfft2_out + operator_name: fft_hfft2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: const at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ihfft2 + operator_name: fft_ihfft2 + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ihfft2_out + operator_name: fft_ihfft2 + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, c10::optional, at::IntArrayRef, c10::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + size: 1 + type: at::IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: const at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fftn + operator_name: fft_fftn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fftn_out + operator_name: fft_fftn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ifftn + operator_name: fft_ifftn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ifftn_out + operator_name: fft_ifftn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfftn + operator_name: fft_rfftn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfftn_out + operator_name: fft_rfftn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_irfftn + operator_name: fft_irfftn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_irfftn_out + operator_name: fft_irfftn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_hfftn + operator_name: fft_hfftn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_hfftn_out + operator_name: fft_hfftn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: const at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ihfftn + operator_name: fft_ihfftn + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ihfftn_out + operator_name: fft_ihfftn + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: const at::Tensor & (const at::Tensor &, c10::optional, c10::optional, c10::optional, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + name: norm + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: const at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fftfreq + operator_name: fft_fftfreq + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fftfreq_out + operator_name: fft_fftfreq + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + schema_order_cpp_signature: at::Tensor & (int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfftfreq + operator_name: fft_rfftfreq + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + - annotation: null + default: '{}' + dynamic_type: at::TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: at::TensorOptions + schema_order_cpp_signature: at::Tensor (int64_t, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: at::Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_rfftfreq_out + operator_name: fft_rfftfreq + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + schema_order_cpp_signature: at::Tensor & (int64_t, double, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: d + type: double + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_fftshift + operator_name: fft_fftshift + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: fft_ifftshift + operator_name: fft_ifftshift + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_cholesky_ex + operator_name: linalg_cholesky_ex + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: L + name: L + type: at::Tensor + - dynamic_type: at::Tensor + field_name: info + name: info + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_cholesky_ex_out + operator_name: linalg_cholesky_ex + overload_name: L + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: L + is_nullable: false + name: L + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: info + is_nullable: false + name: info + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: L + is_nullable: false + name: L + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: info + is_nullable: false + name: info + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: L + name: L + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: info + name: info + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_cholesky + operator_name: linalg_cholesky + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_cholesky_out + operator_name: linalg_cholesky + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_cross + operator_name: linalg_cross + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_cross_out + operator_name: linalg_cross + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_lu_factor + operator_name: linalg_lu_factor + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: LU + name: LU + type: at::Tensor + - dynamic_type: at::Tensor + field_name: pivots + name: pivots + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_lu_factor_out + operator_name: linalg_lu_factor + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: LU + is_nullable: false + name: LU + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: pivots + is_nullable: false + name: pivots + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: LU + is_nullable: false + name: LU + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: pivots + is_nullable: false + name: pivots + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: LU + name: LU + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: pivots + name: pivots + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_lu_factor_ex + operator_name: linalg_lu_factor_ex + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: LU + name: LU + type: at::Tensor + - dynamic_type: at::Tensor + field_name: pivots + name: pivots + type: at::Tensor + - dynamic_type: at::Tensor + field_name: info + name: info + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_lu_factor_ex_out + operator_name: linalg_lu_factor_ex + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: LU + is_nullable: false + name: LU + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: pivots + is_nullable: false + name: pivots + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: info + is_nullable: false + name: info + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: pivot + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: LU + is_nullable: false + name: LU + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: pivots + is_nullable: false + name: pivots + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: info + is_nullable: false + name: info + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: LU + name: LU + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: pivots + name: pivots + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: info + name: info + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_det + operator_name: linalg_det + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_det(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_det_out + operator_name: linalg_det + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_det.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: det + operator_name: det + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::det(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _det_lu_based_helper + operator_name: _det_lu_based_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_det_lu_based_helper(Tensor self) -> (Tensor det, Tensor lu, Tensor pivs) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: det + name: det + type: at::Tensor + - dynamic_type: at::Tensor + field_name: lu + name: lu + type: at::Tensor + - dynamic_type: at::Tensor + field_name: pivs + name: pivs + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _det_lu_based_helper_backward_helper + operator_name: _det_lu_based_helper_backward_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_det_lu_based_helper_backward_helper(Tensor det_grad, Tensor det, Tensor self, Tensor lu, Tensor pivs) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: det_grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: det + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: lu + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: pivs + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: det_grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: det + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: lu + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: pivs + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_lstsq + operator_name: linalg_lstsq + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: rcond + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: driver + type: c10::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: rcond + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: driver + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: solution + name: solution + type: at::Tensor + - dynamic_type: at::Tensor + field_name: residuals + name: residuals + type: at::Tensor + - dynamic_type: at::Tensor + field_name: rank + name: rank + type: at::Tensor + - dynamic_type: at::Tensor + field_name: singular_values + name: singular_values + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_lstsq_out + operator_name: linalg_lstsq + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: solution + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: residuals + is_nullable: false + name: residuals + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: rank + is_nullable: false + name: rank + output: true + type: at::Tensor & + - allocate: true + annotation: d! + dynamic_type: at::Tensor + field_name: singular_values + is_nullable: false + name: singular_values + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: rcond + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: driver + type: c10::optional + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: b + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: rcond + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: c10::string_view + is_nullable: true + kwarg_only: true + name: driver + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: solution + is_nullable: false + name: solution + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: residuals + is_nullable: false + name: residuals + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: rank + is_nullable: false + name: rank + output: true + type: at::Tensor & + - allocate: true + annotation: d! + dynamic_type: at::Tensor + field_name: singular_values + is_nullable: false + name: singular_values + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: solution + name: solution + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: residuals + name: residuals + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: rank + name: rank + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: singular_values + name: singular_values + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_matmul + operator_name: linalg_matmul + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matmul(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matmul_out + operator_name: linalg_matmul + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_exp + operator_name: linalg_matrix_exp + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_exp(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_slogdet + operator_name: linalg_slogdet + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: sign + name: sign + type: at::Tensor + - dynamic_type: at::Tensor + field_name: logabsdet + name: logabsdet + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_slogdet_out + operator_name: linalg_slogdet + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: sign + is_nullable: false + name: sign + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: logabsdet + is_nullable: false + name: logabsdet + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: sign + is_nullable: false + name: sign + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: logabsdet + is_nullable: false + name: logabsdet + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: sign + name: sign + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: logabsdet + name: logabsdet + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_eig + operator_name: linalg_eig + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: eigenvalues + type: at::Tensor + - dynamic_type: at::Tensor + field_name: eigenvectors + name: eigenvectors + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_eig_out + operator_name: linalg_eig + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: eigenvalues + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: eigenvectors + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: eigenvalues + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: eigenvectors + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: eigenvalues + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: eigenvectors + name: eigenvectors + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_eigvals + operator_name: linalg_eigvals + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eigvals(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_eigvals_out + operator_name: linalg_eigvals + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_eigh + operator_name: linalg_eigh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: eigenvalues + type: at::Tensor + - dynamic_type: at::Tensor + field_name: eigenvectors + name: eigenvectors + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_eigh_out + operator_name: linalg_eigh + overload_name: eigvals + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: eigvals + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: eigvecs + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::string_view, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: eigenvalues + is_nullable: false + name: eigvals + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: eigenvectors + is_nullable: false + name: eigvecs + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: eigenvalues + name: eigvals + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: eigenvectors + name: eigvecs + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_eigvalsh + operator_name: linalg_eigvalsh + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_eigvalsh_out + operator_name: linalg_eigvalsh + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_eigvalsh.out(Tensor self, str UPLO='L', *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"L"' + dynamic_type: c10::string_view + is_nullable: false + name: UPLO + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_householder_product + operator_name: linalg_householder_product + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tau + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tau + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_householder_product_out + operator_name: linalg_householder_product + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tau + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tau + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _linalg_inv_out_helper_ + operator_name: _linalg_inv_out_helper_ + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_linalg_inv_out_helper_(Tensor(a!) self, Tensor(b!) infos_lu, Tensor(c!) infos_getri) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: infos_lu + type: at::Tensor & + - annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: infos_getri + type: at::Tensor & + schema_order_cpp_signature: at::Tensor & (at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: self + type: at::Tensor & + - annotation: b! + dynamic_type: at::Tensor + is_nullable: false + name: infos_lu + type: at::Tensor & + - annotation: c! + dynamic_type: at::Tensor + is_nullable: false + name: infos_getri + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: self + type: at::Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_inv_ex + operator_name: linalg_inv_ex + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_inv_ex(Tensor self, *, bool check_errors=False) -> (Tensor inverse, Tensor info) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: inverse + name: inverse + type: at::Tensor + - dynamic_type: at::Tensor + field_name: info + name: info + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_inv_ex_out + operator_name: linalg_inv_ex + overload_name: inverse + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_inv_ex.inverse(Tensor self, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: inverse + is_nullable: false + name: inverse + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: info + is_nullable: false + name: info + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: check_errors + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: inverse + is_nullable: false + name: inverse + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: info + is_nullable: false + name: info + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: inverse + name: inverse + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: info + name: info + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_inv + operator_name: linalg_inv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_inv(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_inv_out + operator_name: linalg_inv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_inv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: inner + operator_name: inner + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::inner(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: inner_out + operator_name: inner + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: outer + operator_name: outer + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::outer(Tensor self, Tensor vec2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: outer_out + operator_name: outer + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ger + operator_name: ger + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::ger(Tensor self, Tensor vec2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: ger_out + operator_name: ger + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: vec2 + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_norm + operator_name: linalg_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: ord + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, c10::optional, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: ord + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_norm + operator_name: linalg_norm + overload_name: ord_str + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, c10::optional, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_norm_out + operator_name: linalg_norm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: ord + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, c10::optional, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: ord + type: const c10::optional & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_norm_out + operator_name: linalg_norm + overload_name: ord_str_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, c10::optional, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_vector_norm + operator_name: linalg_vector_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_vector_norm_out + operator_name: linalg_vector_norm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_matrix_norm + operator_name: linalg_matrix_norm + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_norm_out + operator_name: linalg_matrix_norm + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Scalar &, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: const at::Scalar & + is_nullable: false + name: ord + type: const at::Scalar & + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_norm + operator_name: linalg_matrix_norm + overload_name: str_ord + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"fro"' + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"fro"' + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_norm_out + operator_name: linalg_matrix_norm + overload_name: str_ord_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"fro"' + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::IntArrayRef, bool, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"fro"' + dynamic_type: c10::string_view + is_nullable: false + name: ord + type: c10::string_view + - annotation: null + default: '{-2,-1}' + dynamic_type: at::IntArrayRef + is_nullable: false + name: dim + type: at::IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: at::ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _linalg_svd + operator_name: _linalg_svd + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor Vh) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor + - dynamic_type: at::Tensor + field_name: S + name: S + type: at::Tensor + - dynamic_type: at::Tensor + field_name: Vh + name: Vh + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _linalg_svd_out + operator_name: _linalg_svd + overload_name: U + manual_kernel_registration: false + category_override: '' + schema_string: aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: S + is_nullable: false + name: S + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: Vh + is_nullable: false + name: Vh + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: S + is_nullable: false + name: S + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: Vh + is_nullable: false + name: Vh + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: S + name: S + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: Vh + name: Vh + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_svd + operator_name: linalg_svd + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_svd(Tensor A, bool full_matrices=True) -> (Tensor U, Tensor S, Tensor Vh) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor + - dynamic_type: at::Tensor + field_name: S + name: S + type: at::Tensor + - dynamic_type: at::Tensor + field_name: Vh + name: Vh + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_svd_out + operator_name: linalg_svd + overload_name: U + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: S + is_nullable: false + name: S + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: Vh + is_nullable: false + name: Vh + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: full_matrices + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: U + is_nullable: false + name: U + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: S + is_nullable: false + name: S + output: true + type: at::Tensor & + - allocate: true + annotation: c! + dynamic_type: at::Tensor + field_name: Vh + is_nullable: false + name: Vh + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: U + name: U + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: S + name: S + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: Vh + name: Vh + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_svdvals + operator_name: linalg_svdvals + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_svdvals(Tensor A) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_svdvals_out + operator_name: linalg_svdvals + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_svdvals.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: A + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_cond + operator_name: linalg_cond + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_cond_out + operator_name: linalg_cond + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + name: p + type: const c10::optional & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_cond + operator_name: linalg_cond + overload_name: p_str + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cond.p_str(Tensor self, str p) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: p + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: p + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_cond_out + operator_name: linalg_cond + overload_name: p_str_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: p + type: c10::string_view + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: p + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_pinv + operator_name: linalg_pinv + overload_name: atol_rtol_tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_pinv_out + operator_name: linalg_pinv + overload_name: atol_rtol_tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_pinv + operator_name: linalg_pinv + overload_name: atol_rtol_float + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_pinv_out + operator_name: linalg_pinv + overload_name: atol_rtol_float_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_pinv + operator_name: linalg_pinv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: rcond + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: rcond + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_pinv + operator_name: linalg_pinv + overload_name: rcond_tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rcond + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rcond + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_pinv_out + operator_name: linalg_pinv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: rcond + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: rcond + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_pinv_out + operator_name: linalg_pinv + overload_name: out_rcond_tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rcond + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: rcond + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_solve + operator_name: linalg_solve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_solve(Tensor input, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_solve_out + operator_name: linalg_solve + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_solve.out(Tensor input, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_tensorinv + operator_name: linalg_tensorinv + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: ind + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: ind + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_tensorinv_out + operator_name: linalg_tensorinv + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: ind + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: ind + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_tensorsolve + operator_name: linalg_tensorsolve + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dims + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dims + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_tensorsolve_out + operator_name: linalg_tensorsolve + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dims + type: c10::optional + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: at::IntArrayRef + is_nullable: true + name: dims + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_qr + operator_name: linalg_qr + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_qr(Tensor self, str mode='reduced') -> (Tensor Q, Tensor R) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"reduced"' + dynamic_type: c10::string_view + is_nullable: false + name: mode + type: c10::string_view + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"reduced"' + dynamic_type: c10::string_view + is_nullable: false + name: mode + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: Q + name: Q + type: at::Tensor + - dynamic_type: at::Tensor + field_name: R + name: R + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_qr_out + operator_name: linalg_qr + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_qr.out(Tensor self, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: Q + is_nullable: false + name: Q + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: R + is_nullable: false + name: R + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"reduced"' + dynamic_type: c10::string_view + is_nullable: false + name: mode + type: c10::string_view + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::string_view, at::Tensor &, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: '"reduced"' + dynamic_type: c10::string_view + is_nullable: false + name: mode + type: c10::string_view + - allocate: true + annotation: a! + dynamic_type: at::Tensor + field_name: Q + is_nullable: false + name: Q + output: true + type: at::Tensor & + - allocate: true + annotation: b! + dynamic_type: at::Tensor + field_name: R + is_nullable: false + name: R + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + field_name: Q + name: Q + type: at::Tensor & + - dynamic_type: at::Tensor + field_name: R + name: R + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _linalg_qr_helper + operator_name: _linalg_qr_helper + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_linalg_qr_helper(Tensor self, str mode) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: mode + type: c10::string_view + schema_order_cpp_signature: ::std::tuple (const at::Tensor &, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: mode + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result0 + type: at::Tensor + - dynamic_type: at::Tensor + name: result1 + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_matrix_power + operator_name: linalg_matrix_power + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_power(Tensor self, int n) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_power_out + operator_name: linalg_matrix_power + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, int64_t, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank + operator_name: linalg_matrix_rank + overload_name: atol_rtol_tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const c10::optional &, const c10::optional &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank_out + operator_name: linalg_matrix_rank + overload_name: atol_rtol_tensor_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const c10::optional &, const c10::optional &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: atol + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: rtol + type: const c10::optional & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank + operator_name: linalg_matrix_rank + overload_name: atol_rtol_float + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank_out + operator_name: linalg_matrix_rank + overload_name: atol_rtol_float_out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, c10::optional, c10::optional, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: atol + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + kwarg_only: true + name: rtol + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank + operator_name: linalg_matrix_rank + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank_out + operator_name: linalg_matrix_rank + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, double, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank + operator_name: linalg_matrix_rank + overload_name: tol_tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tol + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tol + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_matrix_rank_out + operator_name: linalg_matrix_rank + overload_name: out_tol_tensor + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tol + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + schema_order_cpp_signature: at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: input + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: tol + type: const at::Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: hermitian + type: bool + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_multi_dot + operator_name: linalg_multi_dot + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_multi_dot(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: linalg_multi_dot_out + operator_name: linalg_multi_dot + overload_name: out + manual_kernel_registration: false + category_override: '' + schema_string: aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor & (at::TensorList, at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + - allocate: true + annotation: a! + dynamic_type: at::Tensor + is_nullable: false + name: out + output: true + type: at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: at::Tensor + name: out + type: at::Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _test_serialization_subcmul + operator_name: _test_serialization_subcmul + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: other + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: const at::Scalar & + is_nullable: false + name: alpha + type: const at::Scalar & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _test_optional_intlist + operator_name: _test_optional_intlist + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: addends + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: addends + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _test_optional_filled_intlist + operator_name: _test_optional_filled_intlist + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: addends + size: 2 + type: c10::optional + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::IntArrayRef + is_nullable: true + name: addends + size: 2 + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _test_optional_floatlist + operator_name: _test_optional_floatlist + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: addends + type: c10::optional> + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: values + type: const at::Tensor & + - annotation: null + dynamic_type: at::ArrayRef + is_nullable: true + name: addends + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _test_string_default + operator_name: _test_string_default + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '"\"''\\"' + dynamic_type: c10::string_view + is_nullable: false + name: a + type: c10::string_view + - annotation: null + default: '"\"''\\"' + dynamic_type: c10::string_view + is_nullable: false + name: b + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: '"\"''\\"' + dynamic_type: c10::string_view + is_nullable: false + name: a + type: c10::string_view + - annotation: null + default: '"\"''\\"' + dynamic_type: c10::string_view + is_nullable: false + name: b + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _test_ambiguous_defaults + operator_name: _test_ambiguous_defaults + overload_name: a + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: a + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: b + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: a + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: b + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _test_ambiguous_defaults + operator_name: _test_ambiguous_defaults + overload_name: b + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: a + type: int64_t + - annotation: null + default: '"2"' + dynamic_type: c10::string_view + is_nullable: false + name: b + type: c10::string_view + schema_order_cpp_signature: at::Tensor (const at::Tensor &, int64_t, c10::string_view) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: dummy + type: const at::Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: a + type: int64_t + - annotation: null + default: '"2"' + dynamic_type: c10::string_view + is_nullable: false + name: b + type: c10::string_view + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: _test_warn_in_autograd + operator_name: _test_warn_in_autograd + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_test_warn_in_autograd(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + schema_order_cpp_signature: at::Tensor (const at::Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: self + type: const at::Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: segment_reduce + operator_name: segment_reduce + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: reduce + type: c10::string_view + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: lengths + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: indices + type: const c10::optional & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: unsafe + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + kwarg_only: true + name: initial + type: const c10::optional & + schema_order_cpp_signature: at::Tensor (const at::Tensor &, c10::string_view, const c10::optional &, const c10::optional &, int64_t, bool, const c10::optional &) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: reduce + type: c10::string_view + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: lengths + type: const c10::optional & + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: indices + type: const c10::optional & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: unsafe + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: const at::Scalar & + is_nullable: true + kwarg_only: true + name: initial + type: const c10::optional & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _segment_reduce_backward + operator_name: _segment_reduce_backward + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, int axis=0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: reduce + type: c10::string_view + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: lengths + type: const c10::optional & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + schema_order_cpp_signature: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::string_view, const c10::optional &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: grad + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: output + type: const at::Tensor & + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: data + type: const at::Tensor & + - annotation: null + dynamic_type: c10::string_view + is_nullable: false + name: reduce + type: c10::string_view + - annotation: null + default: '{}' + dynamic_type: at::Tensor + is_nullable: true + kwarg_only: true + name: lengths + type: const c10::optional & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pad_sequence + operator_name: pad_sequence + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: sequences + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: padding_value + type: double + schema_order_cpp_signature: at::Tensor (at::TensorList, bool, double) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: sequences + type: at::TensorList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: padding_value + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: flatten_dense_tensors + operator_name: flatten_dense_tensors + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: at::Tensor (at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::Tensor + name: result + type: at::Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: unflatten_dense_tensors + operator_name: unflatten_dense_tensors + overload_name: '' + manual_kernel_registration: false + category_override: '' + schema_string: aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: flat + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + schema_order_cpp_signature: ::std::vector (const at::Tensor &, at::TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: at::Tensor + is_nullable: false + name: flat + type: const at::Tensor & + - annotation: null + dynamic_type: at::TensorList + is_nullable: false + name: tensors + type: at::TensorList + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: at::TensorList + name: result + type: ::std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true diff --git a/libtch/c-generated.go b/libtch/c-generated.go index 4f8748d..cacba30 100644 --- a/libtch/c-generated.go +++ b/libtch/c-generated.go @@ -118,8 +118,17 @@ cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor)) cgrowthInterval := *(*C.int64_t)(unsafe.Pointer(&growthInterval)) C.atg__amp_update_scale_(ptr, self, growthTracker, foundInf, cscaleGrowthFactor, cscaleBackoffFactor, cgrowthInterval) } -func Atg_BaddbmmMkl_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ - C.atg__baddbmm_mkl_(ptr, self, batch1, batch2) +func Atg_AutocastToFullPrecision(ptr *Ctensor, self Ctensor, cudaEnabled int32, cpuEnabled int32){ +ccudaEnabled := *(*C.int)(unsafe.Pointer(&cudaEnabled)) +ccpuEnabled := *(*C.int)(unsafe.Pointer(&cpuEnabled)) + C.atg__autocast_to_full_precision(ptr, self, ccudaEnabled, ccpuEnabled) +} +func Atg_AutocastToReducedPrecision(ptr *Ctensor, self Ctensor, cudaEnabled int32, cpuEnabled int32, cudaDtype int32, cpuDtype int32){ +ccudaEnabled := *(*C.int)(unsafe.Pointer(&cudaEnabled)) +ccpuEnabled := *(*C.int)(unsafe.Pointer(&cpuEnabled)) +ccudaDtype := *(*C.int)(unsafe.Pointer(&cudaDtype)) +ccpuDtype := *(*C.int)(unsafe.Pointer(&cpuDtype)) + C.atg__autocast_to_reduced_precision(ptr, self, ccudaEnabled, ccpuEnabled, ccudaDtype, ccpuDtype) } func Atg_CastByte(ptr *Ctensor, self Ctensor, nonBlocking int32){ cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) @@ -203,17 +212,6 @@ cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) C.atg__conv_depthwise2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) } -func Atg_ConvDepthwise2dBackward(ptr *Ctensor, gradInput Ctensor, gradWeight Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg__conv_depthwise2d_backward(ptr, gradInput, gradWeight, gradOutput, self, weight, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) -} func Atg_ConvDepthwise2dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) @@ -235,6 +233,16 @@ csize := *(*C.int64_t)(unsafe.Pointer(&size)) coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) C.atg__convert_indices_from_coo_to_csr_out(ptr, out, self, csize, coutInt32) } +func Atg_ConvertIndicesFromCsrToCoo(ptr *Ctensor, crowIndices Ctensor, colIndices Ctensor, outInt32 int32, transpose int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) + C.atg__convert_indices_from_csr_to_coo(ptr, crowIndices, colIndices, coutInt32, ctranspose) +} +func Atg_ConvertIndicesFromCsrToCooOut(ptr *Ctensor, out Ctensor, crowIndices Ctensor, colIndices Ctensor, outInt32 int32, transpose int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) + C.atg__convert_indices_from_csr_to_coo_out(ptr, out, crowIndices, colIndices, coutInt32, ctranspose) +} func Atg_Convolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32, allowTf32 int32){ cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) @@ -279,18 +287,6 @@ cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) C.atg__convolution_mode(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpadding, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) } -func Atg_ConvolutionNogroup(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - C.atg__convolution_nogroup(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen) -} func Atg_CopyFrom(ptr *Ctensor, self Ctensor, dst Ctensor, nonBlocking int32){ cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) C.atg__copy_from(ptr, self, dst, cnonBlocking) @@ -398,6 +394,13 @@ func Atg_Dimv(self Ctensor) int64{ func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor){ C.atg__dirichlet_grad(ptr, x, alpha, total) } +func Atg_Efficientzerotensor(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg__efficientzerotensor(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} func Atg_EmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32, paddingIdx int64){ cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) @@ -573,6 +576,18 @@ func Atg_HasCompatibleShallowCopyType(self Ctensor, from Ctensor) bool{ if cbool == 1{return true} return false } +func Atg_HasSameStorageNumel(self Ctensor, other Ctensor) bool{ + cResult := C.atg__has_same_storage_numel(self, other) + cbool := *(*int)(unsafe.Pointer(&cResult)) + if cbool == 1{return true} + return false +} +func Atg_HistogramddFromBinTensors(ptr *Ctensor, self Ctensor, binsData []Ctensor, binsLen int, weight Ctensor, density int32){ +cbinsDataPtr := (*Ctensor)(unsafe.Pointer(&binsData[0])) +cbinsLen := *(*C.int)(unsafe.Pointer(&binsLen)) +cdensity := *(*C.int)(unsafe.Pointer(&density)) + C.atg__histogramdd_from_bin_tensors(ptr, self, cbinsDataPtr, cbinsLen, weight, cdensity) +} func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) C.atg__index_copy_(ptr, self, cdim, index, source) @@ -587,8 +602,11 @@ cunsafety := *(*C.int)(unsafe.Pointer(&unsafety)) func Atg_Indices(ptr *Ctensor, self Ctensor){ C.atg__indices(ptr, self) } -func Atg_InverseHelper(ptr *Ctensor, self Ctensor){ - C.atg__inverse_helper(ptr, self) +func Atg_IsZerotensor(self Ctensor) bool{ + cResult := C.atg__is_zerotensor(self) + cbool := *(*int)(unsafe.Pointer(&cResult)) + if cbool == 1{return true} + return false } func Atg_LinalgInvOutHelper_(ptr *Ctensor, self Ctensor, infosLu Ctensor, infosGetri Ctensor){ C.atg__linalg_inv_out_helper_(ptr, self, infosLu, infosGetri) @@ -599,18 +617,30 @@ modeLen := len(mode) cmodeLen := *(*C.int)(unsafe.Pointer(&modeLen)) C.atg__linalg_qr_helper(ptr, self, cmode, cmodeLen) } +func Atg_LinalgSvd(ptr *Ctensor, a Ctensor, fullMatrices int32, computeUv int32){ +cfullMatrices := *(*C.int)(unsafe.Pointer(&fullMatrices)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) + C.atg__linalg_svd(ptr, a, cfullMatrices, ccomputeUv) +} +func Atg_LinalgSvdU(ptr *Ctensor, u Ctensor, s Ctensor, vh Ctensor, a Ctensor, fullMatrices int32, computeUv int32){ +cfullMatrices := *(*C.int)(unsafe.Pointer(&fullMatrices)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) + C.atg__linalg_svd_u(ptr, u, s, vh, a, cfullMatrices, ccomputeUv) +} func Atg_LogSoftmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) C.atg__log_softmax(ptr, self, cdim, chalfToFloat) } -func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, self) +func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, inputDtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cinputDtype := *(*C.int)(unsafe.Pointer(&inputDtype)) + C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, cinputDtype) } -func Atg_LogSoftmaxBackwardDataOut(ptr *Ctensor, out Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__log_softmax_backward_data_out(ptr, out, gradOutput, output, cdim, self) +func Atg_LogSoftmaxBackwardDataOut(ptr *Ctensor, out Ctensor, gradOutput Ctensor, output Ctensor, dim int64, inputDtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cinputDtype := *(*C.int)(unsafe.Pointer(&inputDtype)) + C.atg__log_softmax_backward_data_out(ptr, out, gradOutput, output, cdim, cinputDtype) } func Atg_LogSoftmaxOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, halfToFloat int32){ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) @@ -647,6 +677,9 @@ func Atg_MaskedScale(ptr *Ctensor, self Ctensor, mask Ctensor, scale float64){ cscale := *(*C.double)(unsafe.Pointer(&scale)) C.atg__masked_scale(ptr, self, mask, cscale) } +func Atg_MaskedSoftmax(ptr *Ctensor, self Ctensor, mask Ctensor){ + C.atg__masked_softmax(ptr, self, mask) +} func Atg_MkldnnReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){ cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) @@ -662,9 +695,16 @@ cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1) } +func Atg_NativeMultiHeadSelfAttention(ptr *Ctensor, query Ctensor, qkvWeight Ctensor, qkvBias Ctensor, projWeight Ctensor, projBias Ctensor, mask Ctensor){ + C.atg__native_multi_head_self_attention(ptr, query, qkvWeight, qkvBias, projWeight, projBias, mask) +} func Atg_NegView(ptr *Ctensor, self Ctensor){ C.atg__neg_view(ptr, self) } +func Atg_NewZerosWithSameFeatureMeta(ptr *Ctensor, self Ctensor, other Ctensor, selfNumBatchDims int64){ +cselfNumBatchDims := *(*C.int64_t)(unsafe.Pointer(&selfNumBatchDims)) + C.atg__new_zeros_with_same_feature_meta(ptr, self, other, cselfNumBatchDims) +} func Atg_NnpackAvailable() bool{ cResult := C.atg__nnpack_available() cbool := *(*int)(unsafe.Pointer(&cResult)) @@ -678,18 +718,6 @@ cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) C.atg__nnpack_spatial_convolution(ptr, input, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) } -func Atg_NnpackSpatialConvolutionBackwardInput(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg__nnpack_spatial_convolution_backward_input(ptr, input, gradOutput, weight, cpaddingDataPtr, cpaddingLen) -} -func Atg_NnpackSpatialConvolutionBackwardWeight(ptr *Ctensor, input Ctensor, weightsizeData []int64, weightsizeLen int, gradOutput Ctensor, paddingData []int64, paddingLen int){ -cweightsizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightsizeData[0])) -cweightsizeLen := *(*C.int)(unsafe.Pointer(&weightsizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen) -} func Atg_Nnz(self Ctensor) int64{ cResult := C.atg__nnz(self) return *(*int64)(unsafe.Pointer(&cResult)) @@ -756,6 +784,15 @@ caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) func Atg_ShapeAsTensor(ptr *Ctensor, self Ctensor){ C.atg__shape_as_tensor(ptr, self) } +func Atg_SlowConv2dBackward(ptr *Ctensor, gradInput Ctensor, gradWeight Ctensor, gradBias Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg__slow_conv2d_backward(ptr, gradInput, gradWeight, gradBias, gradOutput, self, weight, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} func Atg_SobolEngineDraw(ptr *Ctensor, quasi Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64, dtype int32){ cn := *(*C.int64_t)(unsafe.Pointer(&n)) cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) @@ -782,13 +819,15 @@ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) C.atg__softmax(ptr, self, cdim, chalfToFloat) } -func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, self) +func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, inputDtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cinputDtype := *(*C.int)(unsafe.Pointer(&inputDtype)) + C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, cinputDtype) } -func Atg_SoftmaxBackwardDataOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__softmax_backward_data_out(ptr, gradInput, gradOutput, output, cdim, self) +func Atg_SoftmaxBackwardDataOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor, dim int64, inputDtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cinputDtype := *(*C.int)(unsafe.Pointer(&inputDtype)) + C.atg__softmax_backward_data_out(ptr, gradInput, gradOutput, output, cdim, cinputDtype) } func Atg_SoftmaxOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, halfToFloat int32){ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) @@ -801,6 +840,11 @@ func Atg_SolveHelper(ptr *Ctensor, self Ctensor, a Ctensor){ func Atg_SparseAddmm(ptr *Ctensor, self Ctensor, sparse Ctensor, dense Ctensor){ C.atg__sparse_addmm(ptr, self, sparse, dense) } +func Atg_SparseBroadcastTo(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg__sparse_broadcast_to(ptr, self, csizeDataPtr, csizeLen) +} func Atg_SparseCooTensorUnsafe(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) @@ -911,11 +955,6 @@ func Atg_StandardGamma(ptr *Ctensor, self Ctensor){ func Atg_StandardGammaGrad(ptr *Ctensor, self Ctensor, output Ctensor){ C.atg__standard_gamma_grad(ptr, self, output) } -func Atg_SvdHelper(ptr *Ctensor, self Ctensor, some int32, computeUv int32){ -csome := *(*C.int)(unsafe.Pointer(&some)) -ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) - C.atg__svd_helper(ptr, self, csome, ccomputeUv) -} func Atg_SymeigHelper(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){ ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) cupper := *(*C.int)(unsafe.Pointer(&upper)) @@ -955,6 +994,9 @@ bLen := len(b) cbLen := *(*C.int)(unsafe.Pointer(&bLen)) C.atg__test_string_default(ptr, dummy, ca, caLen, cb, cbLen) } +func Atg_TestWarnInAutograd(ptr *Ctensor, self Ctensor){ + C.atg__test_warn_in_autograd(ptr, self) +} func Atg_ToCopy(ptr *Ctensor, self Ctensor, optionsKind int32, optionsDevice int32, nonBlocking int32){ coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) @@ -962,6 +1004,9 @@ cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) C.atg__to_copy(ptr, self, coptionsKind, coptionsDevice, cnonBlocking) } +func Atg_TorchCudaCuLinkerSymbolOp(ptr *Ctensor, self Ctensor){ + C.atg__torch_cuda_cu_linker_symbol_op(ptr, self) +} func Atg_Trilinear(ptr *Ctensor, i1 Ctensor, i2 Ctensor, i3 Ctensor, expand1Data []int64, expand1Len int, expand2Data []int64, expand2Len int, expand3Data []int64, expand3Len int, sumdimData []int64, sumdimLen int, unrollDim int64){ cexpand1DataPtr := (*C.int64_t)(unsafe.Pointer(&expand1Data[0])) cexpand1Len := *(*C.int)(unsafe.Pointer(&expand1Len)) @@ -994,6 +1039,214 @@ csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen) } +func Atg_UpsampleBicubic2dAa(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bicubic2d_aa(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleBicubic2dAaBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bicubic2d_aa_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleBicubic2dAaBackwardGradInput(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bicubic2d_aa_backward_grad_input(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleBicubic2dAaOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bicubic2d_aa_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleBilinear2dAa(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bilinear2d_aa(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleBilinear2dAaBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bilinear2d_aa_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleBilinear2dAaBackwardGradInput(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bilinear2d_aa_backward_grad_input(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleBilinear2dAaOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_bilinear2d_aa_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) + C.atg__upsample_nearest_exact1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesVal, cscalesNull) +} +func Atg_UpsampleNearestExact1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) + C.atg__upsample_nearest_exact1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesVal, cscalesNull) +} +func Atg_UpsampleNearestExact1dBackwardGradInput(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) + C.atg__upsample_nearest_exact1d_backward_grad_input(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesVal, cscalesNull) +} +func Atg_UpsampleNearestExact1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) + C.atg__upsample_nearest_exact1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesVal, cscalesNull) +} +func Atg_UpsampleNearestExact2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact2dBackwardGradInput(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact2d_backward_grad_input(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact3dBackwardGradInput(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact3d_backward_grad_input(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func Atg_UpsampleNearestExact3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) + C.atg__upsample_nearest_exact3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} func Atg_UseCudnnCtcLoss(logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64) bool{ cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) @@ -1204,6 +1457,9 @@ func AtgAddr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ func AtgAddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ C.atg_addr_out(ptr, out, self, vec1, vec2) } +func AtgAdjoint(ptr *Ctensor, self Ctensor){ + C.atg_adjoint(ptr, self) +} func AtgAffineGridGenerator(ptr *Ctensor, theta Ctensor, sizeData []int64, sizeLen int, alignCorners int32){ csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) @@ -1376,6 +1632,15 @@ func AtgArcsinhOut(ptr *Ctensor, out Ctensor, self Ctensor){ func AtgArctan(ptr *Ctensor, self Ctensor){ C.atg_arctan(ptr, self) } +func AtgArctan2(ptr *Ctensor, self Ctensor, other Ctensor){ + C.atg_arctan2(ptr, self, other) +} +func AtgArctan2_(ptr *Ctensor, self Ctensor, other Ctensor){ + C.atg_arctan2_(ptr, self, other) +} +func AtgArctan2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ + C.atg_arctan2_out(ptr, out, self, other) +} func AtgArctan_(ptr *Ctensor, self Ctensor){ C.atg_arctan_(ptr, self) } @@ -1420,6 +1685,9 @@ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) cdescending := *(*C.int)(unsafe.Pointer(&descending)) C.atg_argsort(ptr, self, cdim, cdescending) } +func AtgArgwhere(ptr *Ctensor, self Ctensor){ + C.atg_argwhere(ptr, self) +} func AtgAsStrided(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffsetVal int64, storageOffsetNull int){ csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) @@ -2226,17 +2494,6 @@ cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) C.atg_conv_depthwise3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) } -func AtgConvDepthwise3dBackward(ptr *Ctensor, gradInput Ctensor, gradWeight Ctensor, gradBias Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_conv_depthwise3d_backward(ptr, gradInput, gradWeight, gradBias, gradOutput, self, weight, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) -} func AtgConvTbc(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, pad int64){ cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) C.atg_conv_tbc(ptr, self, weight, bias, cpad) @@ -2456,60 +2713,6 @@ cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) C.atg_cudnn_convolution_add_relu(ptr, self, weight, z, alpha , bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) } -func AtgCudnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) - C.atg_cudnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) -} -func AtgCudnnConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) - C.atg_cudnn_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) -} -func AtgCudnnConvolutionDeprecated(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_deprecated(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionDeprecated2(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_deprecated2(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} func AtgCudnnConvolutionRelu(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) @@ -2535,62 +2738,6 @@ cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) C.atg_cudnn_convolution_transpose(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) } -func AtgCudnnConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) - C.atg_cudnn_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) -} -func AtgCudnnConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) - C.atg_cudnn_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) -} -func AtgCudnnConvolutionTransposeDeprecated(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_transpose_deprecated(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTransposeDeprecated2(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_transpose_deprecated2(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor){ C.atg_cudnn_grid_sampler(ptr, self, grid) } @@ -2732,6 +2879,12 @@ cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) C.atg_diagonal_backward(ptr, gradOutput, cinputSizesDataPtr, cinputSizesLen, coffset, cdim1, cdim2) } +func AtgDiagonalScatter(ptr *Ctensor, self Ctensor, src Ctensor, offset int64, dim1 int64, dim2 int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) + C.atg_diagonal_scatter(ptr, self, src, coffset, cdim1, cdim2) +} func AtgDiff(ptr *Ctensor, self Ctensor, n int64, dim int64, prepend Ctensor, append Ctensor){ cn := *(*C.int64_t)(unsafe.Pointer(&n)) cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) @@ -3259,6 +3412,26 @@ normLen := len(norm) cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) C.atg_fft_hfft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) } +func AtgFftHfft2(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_hfft2(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFftHfft2Out(ptr *Ctensor, out Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_hfft2_out(ptr, out, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} func AtgFftHfftOut(ptr *Ctensor, out Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) @@ -3268,6 +3441,26 @@ normLen := len(norm) cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) C.atg_fft_hfft_out(ptr, out, self, cnVal, cnNull, cdim, cnorm, cnormLen) } +func AtgFftHfftn(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_hfftn(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFftHfftnOut(ptr *Ctensor, out Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_hfftn_out(ptr, out, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} func AtgFftIfft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) @@ -3340,6 +3533,26 @@ normLen := len(norm) cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) C.atg_fft_ihfft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) } +func AtgFftIhfft2(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_ihfft2(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFftIhfft2Out(ptr *Ctensor, out Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_ihfft2_out(ptr, out, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} func AtgFftIhfftOut(ptr *Ctensor, out Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) @@ -3349,6 +3562,26 @@ normLen := len(norm) cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) C.atg_fft_ihfft_out(ptr, out, self, cnVal, cnNull, cdim, cnorm, cnormLen) } +func AtgFftIhfftn(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_ihfftn(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFftIhfftnOut(ptr *Ctensor, out Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) + C.atg_fft_ihfftn_out(ptr, out, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} func AtgFftIrfft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) @@ -3853,12 +4086,6 @@ cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) C.atg_grid_sampler_2d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) } -func AtgGridSampler2dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ -cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) -cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_grid_sampler_2d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} func AtgGridSampler3d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) @@ -4176,13 +4403,9 @@ func AtgIndexAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source C cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) C.atg_index_add_(ptr, self, cdim, index, source) } -func AtgIndexAddAlpha(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor, alpha Cscalar){ +func AtgIndexAddOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_add_alpha(ptr, self, cdim, index, source, alpha ) -} -func AtgIndexAddAlpha_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor, alpha Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_add_alpha_(ptr, self, cdim, index, source, alpha ) + C.atg_index_add_out(ptr, out, self, cdim, index, source) } func AtgIndexCopy(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) @@ -4645,12 +4868,26 @@ pLen := len(p) cpLen := *(*C.int)(unsafe.Pointer(&pLen)) C.atg_linalg_cond_p_str_out(ptr, out, self, cp, cpLen) } +func AtgLinalgCross(ptr *Ctensor, self Ctensor, other Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_linalg_cross(ptr, self, other, cdim) +} +func AtgLinalgCrossOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_linalg_cross_out(ptr, out, self, other, cdim) +} func AtgLinalgDet(ptr *Ctensor, self Ctensor){ C.atg_linalg_det(ptr, self) } func AtgLinalgDetOut(ptr *Ctensor, out Ctensor, self Ctensor){ C.atg_linalg_det_out(ptr, out, self) } +func AtgLinalgDiagonal(ptr *Ctensor, a Ctensor, offset int64, dim1 int64, dim2 int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) + C.atg_linalg_diagonal(ptr, a, coffset, cdim1, cdim2) +} func AtgLinalgEig(ptr *Ctensor, self Ctensor){ C.atg_linalg_eig(ptr, self) } @@ -4723,12 +4960,33 @@ driverLen := len(driver) cdriverLen := *(*C.int)(unsafe.Pointer(&driverLen)) C.atg_linalg_lstsq_out(ptr, solution, residuals, rank, singularValues, self, b, crcondVal, crcondNull, cdriver, cdriverLen) } +func AtgLinalgLuFactor(ptr *Ctensor, a Ctensor, pivot int32){ +cpivot := *(*C.int)(unsafe.Pointer(&pivot)) + C.atg_linalg_lu_factor(ptr, a, cpivot) +} +func AtgLinalgLuFactorEx(ptr *Ctensor, a Ctensor, pivot int32, checkErrors int32){ +cpivot := *(*C.int)(unsafe.Pointer(&pivot)) +ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors)) + C.atg_linalg_lu_factor_ex(ptr, a, cpivot, ccheckErrors) +} +func AtgLinalgLuFactorExOut(ptr *Ctensor, lU Ctensor, pivots Ctensor, info Ctensor, a Ctensor, pivot int32, checkErrors int32){ +cpivot := *(*C.int)(unsafe.Pointer(&pivot)) +ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors)) + C.atg_linalg_lu_factor_ex_out(ptr, lU, pivots, info, a, cpivot, ccheckErrors) +} +func AtgLinalgLuFactorOut(ptr *Ctensor, lU Ctensor, pivots Ctensor, a Ctensor, pivot int32){ +cpivot := *(*C.int)(unsafe.Pointer(&pivot)) + C.atg_linalg_lu_factor_out(ptr, lU, pivots, a, cpivot) +} func AtgLinalgMatmul(ptr *Ctensor, self Ctensor, other Ctensor){ C.atg_linalg_matmul(ptr, self, other) } func AtgLinalgMatmulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ C.atg_linalg_matmul_out(ptr, out, self, other) } +func AtgLinalgMatrixExp(ptr *Ctensor, self Ctensor){ + C.atg_linalg_matrix_exp(ptr, self) +} func AtgLinalgMatrixPower(ptr *Ctensor, self Ctensor, n int64){ cn := *(*C.int64_t)(unsafe.Pointer(&n)) C.atg_linalg_matrix_power(ptr, self, cn) @@ -4737,17 +4995,39 @@ func AtgLinalgMatrixPowerOut(ptr *Ctensor, out Ctensor, self Ctensor, n int64){ cn := *(*C.int64_t)(unsafe.Pointer(&n)) C.atg_linalg_matrix_power_out(ptr, out, self, cn) } -func AtgLinalgMatrixRank(ptr *Ctensor, self Ctensor, tolVal float64, tolNull int, hermitian int32){ -ctolVal := *(*C.double)(unsafe.Pointer(&tolVal)) -ctolNull := *(*C.uint8_t)(unsafe.Pointer(&tolNull)) +func AtgLinalgMatrixRank(ptr *Ctensor, self Ctensor, tol float64, hermitian int32){ +ctol := *(*C.double)(unsafe.Pointer(&tol)) chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) - C.atg_linalg_matrix_rank(ptr, self, ctolVal, ctolNull, chermitian) + C.atg_linalg_matrix_rank(ptr, self, ctol, chermitian) } -func AtgLinalgMatrixRankOut(ptr *Ctensor, out Ctensor, self Ctensor, tolVal float64, tolNull int, hermitian int32){ -ctolVal := *(*C.double)(unsafe.Pointer(&tolVal)) -ctolNull := *(*C.uint8_t)(unsafe.Pointer(&tolNull)) +func AtgLinalgMatrixRankAtolRtolFloat(ptr *Ctensor, self Ctensor, atolVal float64, atolNull int, rtolVal float64, rtolNull int, hermitian int32){ +catolVal := *(*C.double)(unsafe.Pointer(&atolVal)) +catolNull := *(*C.uint8_t)(unsafe.Pointer(&atolNull)) +crtolVal := *(*C.double)(unsafe.Pointer(&rtolVal)) +crtolNull := *(*C.uint8_t)(unsafe.Pointer(&rtolNull)) chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) - C.atg_linalg_matrix_rank_out(ptr, out, self, ctolVal, ctolNull, chermitian) + C.atg_linalg_matrix_rank_atol_rtol_float(ptr, self, catolVal, catolNull, crtolVal, crtolNull, chermitian) +} +func AtgLinalgMatrixRankAtolRtolFloatOut(ptr *Ctensor, out Ctensor, self Ctensor, atolVal float64, atolNull int, rtolVal float64, rtolNull int, hermitian int32){ +catolVal := *(*C.double)(unsafe.Pointer(&atolVal)) +catolNull := *(*C.uint8_t)(unsafe.Pointer(&atolNull)) +crtolVal := *(*C.double)(unsafe.Pointer(&rtolVal)) +crtolNull := *(*C.uint8_t)(unsafe.Pointer(&rtolNull)) +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_matrix_rank_atol_rtol_float_out(ptr, out, self, catolVal, catolNull, crtolVal, crtolNull, chermitian) +} +func AtgLinalgMatrixRankAtolRtolTensor(ptr *Ctensor, input Ctensor, atol Ctensor, rtol Ctensor, hermitian int32){ +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_matrix_rank_atol_rtol_tensor(ptr, input, atol, rtol, chermitian) +} +func AtgLinalgMatrixRankAtolRtolTensorOut(ptr *Ctensor, out Ctensor, input Ctensor, atol Ctensor, rtol Ctensor, hermitian int32){ +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_matrix_rank_atol_rtol_tensor_out(ptr, out, input, atol, rtol, chermitian) +} +func AtgLinalgMatrixRankOut(ptr *Ctensor, out Ctensor, self Ctensor, tol float64, hermitian int32){ +ctol := *(*C.double)(unsafe.Pointer(&tol)) +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_matrix_rank_out(ptr, out, self, ctol, chermitian) } func AtgLinalgMatrixRankOutTolTensor(ptr *Ctensor, out Ctensor, input Ctensor, tol Ctensor, hermitian int32){ chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) @@ -4806,6 +5086,30 @@ crcond := *(*C.double)(unsafe.Pointer(&rcond)) chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) C.atg_linalg_pinv(ptr, self, crcond, chermitian) } +func AtgLinalgPinvAtolRtolFloat(ptr *Ctensor, self Ctensor, atolVal float64, atolNull int, rtolVal float64, rtolNull int, hermitian int32){ +catolVal := *(*C.double)(unsafe.Pointer(&atolVal)) +catolNull := *(*C.uint8_t)(unsafe.Pointer(&atolNull)) +crtolVal := *(*C.double)(unsafe.Pointer(&rtolVal)) +crtolNull := *(*C.uint8_t)(unsafe.Pointer(&rtolNull)) +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_pinv_atol_rtol_float(ptr, self, catolVal, catolNull, crtolVal, crtolNull, chermitian) +} +func AtgLinalgPinvAtolRtolFloatOut(ptr *Ctensor, out Ctensor, self Ctensor, atolVal float64, atolNull int, rtolVal float64, rtolNull int, hermitian int32){ +catolVal := *(*C.double)(unsafe.Pointer(&atolVal)) +catolNull := *(*C.uint8_t)(unsafe.Pointer(&atolNull)) +crtolVal := *(*C.double)(unsafe.Pointer(&rtolVal)) +crtolNull := *(*C.uint8_t)(unsafe.Pointer(&rtolNull)) +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_pinv_atol_rtol_float_out(ptr, out, self, catolVal, catolNull, crtolVal, crtolNull, chermitian) +} +func AtgLinalgPinvAtolRtolTensor(ptr *Ctensor, self Ctensor, atol Ctensor, rtol Ctensor, hermitian int32){ +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_pinv_atol_rtol_tensor(ptr, self, atol, rtol, chermitian) +} +func AtgLinalgPinvAtolRtolTensorOut(ptr *Ctensor, out Ctensor, self Ctensor, atol Ctensor, rtol Ctensor, hermitian int32){ +chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) + C.atg_linalg_pinv_atol_rtol_tensor_out(ptr, out, self, atol, rtol, chermitian) +} func AtgLinalgPinvOut(ptr *Ctensor, out Ctensor, self Ctensor, rcond float64, hermitian int32){ crcond := *(*C.double)(unsafe.Pointer(&rcond)) chermitian := *(*C.int)(unsafe.Pointer(&hermitian)) @@ -4843,19 +5147,31 @@ func AtgLinalgSolve(ptr *Ctensor, input Ctensor, other Ctensor){ func AtgLinalgSolveOut(ptr *Ctensor, out Ctensor, input Ctensor, other Ctensor){ C.atg_linalg_solve_out(ptr, out, input, other) } -func AtgLinalgSvd(ptr *Ctensor, self Ctensor, fullMatrices int32){ +func AtgLinalgSolveTriangular(ptr *Ctensor, self Ctensor, b Ctensor, upper int32, left int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +cleft := *(*C.int)(unsafe.Pointer(&left)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) + C.atg_linalg_solve_triangular(ptr, self, b, cupper, cleft, cunitriangular) +} +func AtgLinalgSolveTriangularOut(ptr *Ctensor, out Ctensor, self Ctensor, b Ctensor, upper int32, left int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +cleft := *(*C.int)(unsafe.Pointer(&left)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) + C.atg_linalg_solve_triangular_out(ptr, out, self, b, cupper, cleft, cunitriangular) +} +func AtgLinalgSvd(ptr *Ctensor, a Ctensor, fullMatrices int32){ cfullMatrices := *(*C.int)(unsafe.Pointer(&fullMatrices)) - C.atg_linalg_svd(ptr, self, cfullMatrices) + C.atg_linalg_svd(ptr, a, cfullMatrices) } -func AtgLinalgSvdU(ptr *Ctensor, u Ctensor, s Ctensor, vh Ctensor, self Ctensor, fullMatrices int32){ +func AtgLinalgSvdU(ptr *Ctensor, u Ctensor, s Ctensor, vh Ctensor, a Ctensor, fullMatrices int32){ cfullMatrices := *(*C.int)(unsafe.Pointer(&fullMatrices)) - C.atg_linalg_svd_u(ptr, u, s, vh, self, cfullMatrices) + C.atg_linalg_svd_u(ptr, u, s, vh, a, cfullMatrices) } -func AtgLinalgSvdvals(ptr *Ctensor, input Ctensor){ - C.atg_linalg_svdvals(ptr, input) +func AtgLinalgSvdvals(ptr *Ctensor, a Ctensor){ + C.atg_linalg_svdvals(ptr, a) } -func AtgLinalgSvdvalsOut(ptr *Ctensor, out Ctensor, input Ctensor){ - C.atg_linalg_svdvals_out(ptr, out, input) +func AtgLinalgSvdvalsOut(ptr *Ctensor, out Ctensor, a Ctensor){ + C.atg_linalg_svdvals_out(ptr, out, a) } func AtgLinalgTensorinv(ptr *Ctensor, self Ctensor, ind int64){ cind := *(*C.int64_t)(unsafe.Pointer(&ind)) @@ -4881,17 +5197,15 @@ func AtgLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ func AtgLinearOut(ptr *Ctensor, out Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ C.atg_linear_out(ptr, out, input, weight, bias) } -func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int, optionsKind int32, optionsDevice int32){ -cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) -cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) +func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, optionsKind int32, optionsDevice int32){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_linspace(ptr, start , end , cstepsVal, cstepsNull, coptionsKind, coptionsDevice) + C.atg_linspace(ptr, start , end , csteps, coptionsKind, coptionsDevice) } -func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int){ -cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) -cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) - C.atg_linspace_out(ptr, out, start , end , cstepsVal, cstepsNull) +func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) + C.atg_linspace_out(ptr, out, start , end , csteps) } func AtgLog(ptr *Ctensor, self Ctensor){ C.atg_log(ptr, self) @@ -5035,19 +5349,17 @@ cepsVal := *(*C.double)(unsafe.Pointer(&epsVal)) cepsNull := *(*C.uint8_t)(unsafe.Pointer(&epsNull)) C.atg_logit_out(ptr, out, self, cepsVal, cepsNull) } -func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int, base float64, optionsKind int32, optionsDevice int32){ -cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) -cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) +func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, base float64, optionsKind int32, optionsDevice int32){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) cbase := *(*C.double)(unsafe.Pointer(&base)) coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_logspace(ptr, start , end , cstepsVal, cstepsNull, cbase, coptionsKind, coptionsDevice) + C.atg_logspace(ptr, start , end , csteps, cbase, coptionsKind, coptionsDevice) } -func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int, base float64){ -cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) -cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) +func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64, base float64){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) cbase := *(*C.double)(unsafe.Pointer(&base)) - C.atg_logspace_out(ptr, out, start , end , cstepsVal, cstepsNull, cbase) + C.atg_logspace_out(ptr, out, start , end , csteps, cbase) } func AtgLogsumexp(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) @@ -5175,6 +5487,9 @@ func AtgMatrixExp(ptr *Ctensor, self Ctensor){ func AtgMatrixExpBackward(ptr *Ctensor, self Ctensor, grad Ctensor){ C.atg_matrix_exp_backward(ptr, self, grad) } +func AtgMatrixH(ptr *Ctensor, self Ctensor){ + C.atg_matrix_h(ptr, self) +} func AtgMatrixPower(ptr *Ctensor, self Ctensor, n int64){ cn := *(*C.int64_t)(unsafe.Pointer(&n)) C.atg_matrix_power(ptr, self, cn) @@ -5450,6 +5765,9 @@ ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) } +func AtgMh(ptr *Ctensor, self Ctensor){ + C.atg_mh(ptr, self) +} func AtgMin(ptr *Ctensor, self Ctensor){ C.atg_min(ptr, self) } @@ -5497,37 +5815,6 @@ cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) C.atg_miopen_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) } -func AtgMiopenConvolutionBackwardBias(ptr *Ctensor, gradOutput Ctensor){ - C.atg_miopen_convolution_backward_bias(ptr, gradOutput) -} -func AtgMiopenConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} func AtgMiopenConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) @@ -5542,32 +5829,6 @@ cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) C.atg_miopen_convolution_transpose(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) } -func AtgMiopenConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} func AtgMiopenDepthwiseConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) @@ -5580,34 +5841,6 @@ cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) C.atg_miopen_depthwise_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) } -func AtgMiopenDepthwiseConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_depthwise_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenDepthwiseConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_depthwise_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} func AtgMiopenRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){ cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) @@ -5653,32 +5886,6 @@ cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) C.atg_mkldnn_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) } -func AtgMkldnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) - C.atg_mkldnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) -} -func AtgMkldnnConvolutionBackwardWeights(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) - C.atg_mkldnn_convolution_backward_weights(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) -} func AtgMkldnnLinear(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor){ C.atg_mkldnn_linear(ptr, self, weight, bias) } @@ -5821,6 +6028,9 @@ func AtgMsort(ptr *Ctensor, self Ctensor){ func AtgMsortOut(ptr *Ctensor, out Ctensor, self Ctensor){ C.atg_msort_out(ptr, out, self) } +func AtgMt(ptr *Ctensor, self Ctensor){ + C.atg_mt(ptr, self) +} func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor){ C.atg_mul(ptr, self, other) } @@ -5957,31 +6167,25 @@ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) C.atg_nanmedian_dim_values(ptr, values, indices, self, cdim, ckeepdim) } -func AtgNanquantile(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nanquantile(ptr, self, q, cdimVal, cdimNull, ckeepdim) -} -func AtgNanquantileNew(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgNanquantile(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_nanquantile_new(ptr, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) + C.atg_nanquantile(ptr, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } -func AtgNanquantileNewOut(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgNanquantileOut(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_nanquantile_new_out(ptr, out, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) + C.atg_nanquantile_out(ptr, out, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } -func AtgNanquantileNewScalar(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgNanquantileScalar(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ cq := *(*C.double)(unsafe.Pointer(&q)) cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) @@ -5989,9 +6193,9 @@ ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_nanquantile_new_scalar(ptr, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) + C.atg_nanquantile_scalar(ptr, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } -func AtgNanquantileNewScalarOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgNanquantileScalarOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ cq := *(*C.double)(unsafe.Pointer(&q)) cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) @@ -5999,27 +6203,7 @@ ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_nanquantile_new_scalar_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) -} -func AtgNanquantileOut(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nanquantile_out(ptr, out, self, q, cdimVal, cdimNull, ckeepdim) -} -func AtgNanquantileScalar(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ -cq := *(*C.double)(unsafe.Pointer(&q)) -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nanquantile_scalar(ptr, self, cq, cdimVal, cdimNull, ckeepdim) -} -func AtgNanquantileScalarOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ -cq := *(*C.double)(unsafe.Pointer(&q)) -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nanquantile_scalar_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim) + C.atg_nanquantile_scalar_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } func AtgNansum(ptr *Ctensor, self Ctensor, dtype int32){ cdtype := *(*C.int)(unsafe.Pointer(&dtype)) @@ -6074,6 +6258,19 @@ cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) ceps := *(*C.double)(unsafe.Pointer(&eps)) C.atg_native_batch_norm_out(ptr, out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) } +func AtgNativeChannelShuffle(ptr *Ctensor, self Ctensor, groups int64){ +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_native_channel_shuffle(ptr, self, cgroups) +} +func AtgNativeDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_native_dropout(ptr, input, cp, ctrain) +} +func AtgNativeDropoutBackward(ptr *Ctensor, gradOutput Ctensor, mask Ctensor, scale float64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) + C.atg_native_dropout_backward(ptr, gradOutput, mask, cscale) +} func AtgNativeGroupNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, n int64, c int64, hxW int64, group int64, eps float64){ cn := *(*C.int64_t)(unsafe.Pointer(&n)) cc := *(*C.int64_t)(unsafe.Pointer(&c)) @@ -6528,31 +6725,25 @@ func AtgQrQ(ptr *Ctensor, q Ctensor, r Ctensor, self Ctensor, some int32){ csome := *(*C.int)(unsafe.Pointer(&some)) C.atg_qr_q(ptr, q, r, self, csome) } -func AtgQuantile(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_quantile(ptr, self, q, cdimVal, cdimNull, ckeepdim) -} -func AtgQuantileNew(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgQuantile(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_quantile_new(ptr, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) + C.atg_quantile(ptr, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } -func AtgQuantileNewOut(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgQuantileOut(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32, interpolation string){ cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_quantile_new_out(ptr, out, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) + C.atg_quantile_out(ptr, out, self, q, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } -func AtgQuantileNewScalar(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgQuantileScalar(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ cq := *(*C.double)(unsafe.Pointer(&q)) cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) @@ -6560,9 +6751,9 @@ ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_quantile_new_scalar(ptr, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) + C.atg_quantile_scalar(ptr, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } -func AtgQuantileNewScalarOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ +func AtgQuantileScalarOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32, interpolation string){ cq := *(*C.double)(unsafe.Pointer(&q)) cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) @@ -6570,27 +6761,7 @@ ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) cinterpolation := C.CString(interpolation) interpolationLen := len(interpolation) cinterpolationLen := *(*C.int)(unsafe.Pointer(&interpolationLen)) - C.atg_quantile_new_scalar_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) -} -func AtgQuantileOut(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_quantile_out(ptr, out, self, q, cdimVal, cdimNull, ckeepdim) -} -func AtgQuantileScalar(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ -cq := *(*C.double)(unsafe.Pointer(&q)) -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_quantile_scalar(ptr, self, cq, cdimVal, cdimNull, ckeepdim) -} -func AtgQuantileScalarOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ -cq := *(*C.double)(unsafe.Pointer(&q)) -cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) -cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_quantile_scalar_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim) + C.atg_quantile_scalar_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim, cinterpolation, cinterpolationLen) } func AtgQuantizePerChannel(ptr *Ctensor, self Ctensor, scales Ctensor, zeroPoints Ctensor, axis int64, dtype int32){ caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) @@ -6603,6 +6774,11 @@ czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) cdtype := *(*C.int)(unsafe.Pointer(&dtype)) C.atg_quantize_per_tensor(ptr, self, cscale, czeroPoint, cdtype) } +func AtgQuantizePerTensorDynamic(ptr *Ctensor, self Ctensor, dtype int32, reduceRange int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +creduceRange := *(*C.int)(unsafe.Pointer(&reduceRange)) + C.atg_quantize_per_tensor_dynamic(ptr, self, cdtype, creduceRange) +} func AtgQuantizePerTensorTensorQparams(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, dtype int32){ cdtype := *(*C.int)(unsafe.Pointer(&dtype)) C.atg_quantize_per_tensor_tensor_qparams(ptr, self, scale, zeroPoint, cdtype) @@ -7073,6 +7249,18 @@ func AtgRound(ptr *Ctensor, self Ctensor){ func AtgRound_(ptr *Ctensor, self Ctensor){ C.atg_round_(ptr, self) } +func AtgRoundDecimals(ptr *Ctensor, self Ctensor, decimals int64){ +cdecimals := *(*C.int64_t)(unsafe.Pointer(&decimals)) + C.atg_round_decimals(ptr, self, cdecimals) +} +func AtgRoundDecimals_(ptr *Ctensor, self Ctensor, decimals int64){ +cdecimals := *(*C.int64_t)(unsafe.Pointer(&decimals)) + C.atg_round_decimals_(ptr, self, cdecimals) +} +func AtgRoundDecimalsOut(ptr *Ctensor, out Ctensor, self Ctensor, decimals int64){ +cdecimals := *(*C.int64_t)(unsafe.Pointer(&decimals)) + C.atg_round_decimals_out(ptr, out, self, cdecimals) +} func AtgRoundOut(ptr *Ctensor, out Ctensor, self Ctensor){ C.atg_round_out(ptr, out, self) } @@ -7209,20 +7397,29 @@ reduceLen := len(reduce) creduceLen := *(*C.int)(unsafe.Pointer(&reduceLen)) C.atg_scatter_value_reduce_out(ptr, out, self, cdim, index, value , creduce, creduceLen) } -func AtgSearchsorted(ptr *Ctensor, sortedSequence Ctensor, self Ctensor, outInt32 int32, right int32){ +func AtgSearchsorted(ptr *Ctensor, sortedSequence Ctensor, self Ctensor, outInt32 int32, right int32, side string, sorter Ctensor){ coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) -cright := *(*C.int)(unsafe.Pointer(&right)) - C.atg_searchsorted(ptr, sortedSequence, self, coutInt32, cright) +cright := *(*C.int)(unsafe.Pointer(&right)) +cside := C.CString(side) +sideLen := len(side) +csideLen := *(*C.int)(unsafe.Pointer(&sideLen)) + C.atg_searchsorted(ptr, sortedSequence, self, coutInt32, cright, cside, csideLen, sorter) } -func AtgSearchsortedScalar(ptr *Ctensor, sortedSequence Ctensor, selfScalar Cscalar, outInt32 int32, right int32){ +func AtgSearchsortedScalar(ptr *Ctensor, sortedSequence Ctensor, selfScalar Cscalar, outInt32 int32, right int32, side string, sorter Ctensor){ coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) -cright := *(*C.int)(unsafe.Pointer(&right)) - C.atg_searchsorted_scalar(ptr, sortedSequence, selfScalar , coutInt32, cright) +cright := *(*C.int)(unsafe.Pointer(&right)) +cside := C.CString(side) +sideLen := len(side) +csideLen := *(*C.int)(unsafe.Pointer(&sideLen)) + C.atg_searchsorted_scalar(ptr, sortedSequence, selfScalar , coutInt32, cright, cside, csideLen, sorter) } -func AtgSearchsortedTensorOut(ptr *Ctensor, out Ctensor, sortedSequence Ctensor, self Ctensor, outInt32 int32, right int32){ +func AtgSearchsortedTensorOut(ptr *Ctensor, out Ctensor, sortedSequence Ctensor, self Ctensor, outInt32 int32, right int32, side string, sorter Ctensor){ coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) -cright := *(*C.int)(unsafe.Pointer(&right)) - C.atg_searchsorted_tensor_out(ptr, out, sortedSequence, self, coutInt32, cright) +cright := *(*C.int)(unsafe.Pointer(&right)) +cside := C.CString(side) +sideLen := len(side) +csideLen := *(*C.int)(unsafe.Pointer(&sideLen)) + C.atg_searchsorted_tensor_out(ptr, out, sortedSequence, self, coutInt32, cright, cside, csideLen, sorter) } func AtgSegmentReduce(ptr *Ctensor, data Ctensor, reduce string, lengths Ctensor, indices Ctensor, axis int64, unsafety int32, initial Cscalar){ creduce := C.CString(reduce) @@ -7244,6 +7441,11 @@ cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) cindex := *(*C.int64_t)(unsafe.Pointer(&index)) C.atg_select_backward(ptr, gradOutput, cinputSizesDataPtr, cinputSizesLen, cdim, cindex) } +func AtgSelectScatter(ptr *Ctensor, self Ctensor, src Ctensor, dim int64, index int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cindex := *(*C.int64_t)(unsafe.Pointer(&index)) + C.atg_select_scatter(ptr, self, src, cdim, cindex) +} func AtgSelu(ptr *Ctensor, self Ctensor){ C.atg_selu(ptr, self) } @@ -7359,6 +7561,15 @@ cend := *(*C.int64_t)(unsafe.Pointer(&end)) cstep := *(*C.int64_t)(unsafe.Pointer(&step)) C.atg_slice_backward(ptr, gradOutput, cinputSizesDataPtr, cinputSizesLen, cdim, cstart, cend, cstep) } +func AtgSliceScatter(ptr *Ctensor, self Ctensor, src Ctensor, dim int64, startVal int64, startNull int, endVal int64, endNull int, step int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstartVal := *(*C.int64_t)(unsafe.Pointer(&startVal)) +cstartNull := *(*C.uint8_t)(unsafe.Pointer(&startNull)) +cendVal := *(*C.int64_t)(unsafe.Pointer(&endVal)) +cendNull := *(*C.uint8_t)(unsafe.Pointer(&endNull)) +cstep := *(*C.int64_t)(unsafe.Pointer(&step)) + C.atg_slice_scatter(ptr, self, src, cdim, cstartVal, cstartNull, cendVal, cendNull, cstep) +} func AtgSlogdet(ptr *Ctensor, self Ctensor){ C.atg_slogdet(ptr, self) } @@ -7501,11 +7712,11 @@ cdtype := *(*C.int)(unsafe.Pointer(&dtype)) func AtgSoftplus(ptr *Ctensor, self Ctensor){ C.atg_softplus(ptr, self) } -func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ - C.atg_softplus_backward(ptr, gradOutput, self, beta , threshold , output) +func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar){ + C.atg_softplus_backward(ptr, gradOutput, self, beta , threshold ) } -func AtgSoftplusBackwardGradInput(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ - C.atg_softplus_backward_grad_input(ptr, gradInput, gradOutput, self, beta , threshold , output) +func AtgSoftplusBackwardGradInput(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar){ + C.atg_softplus_backward_grad_input(ptr, gradInput, gradOutput, self, beta , threshold ) } func AtgSoftplusOut(ptr *Ctensor, out Ctensor, self Ctensor){ C.atg_softplus_out(ptr, out, self) @@ -7602,6 +7813,12 @@ csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) C.atg_sparse_resize_and_clear_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) } +func AtgSparseSampledAddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ + C.atg_sparse_sampled_addmm(ptr, self, mat1, mat2) +} +func AtgSparseSampledAddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ + C.atg_sparse_sampled_addmm_out(ptr, out, self, mat1, mat2) +} func AtgSpecialDigamma(ptr *Ctensor, self Ctensor){ C.atg_special_digamma(ptr, self) } @@ -7765,11 +7982,13 @@ func AtgSpecialPsi(ptr *Ctensor, self Ctensor){ func AtgSpecialPsiOut(ptr *Ctensor, out Ctensor, self Ctensor){ C.atg_special_psi_out(ptr, out, self) } -func AtgSpecialRound(ptr *Ctensor, self Ctensor){ - C.atg_special_round(ptr, self) +func AtgSpecialRound(ptr *Ctensor, self Ctensor, decimals int64){ +cdecimals := *(*C.int64_t)(unsafe.Pointer(&decimals)) + C.atg_special_round(ptr, self, cdecimals) } -func AtgSpecialRoundOut(ptr *Ctensor, out Ctensor, self Ctensor){ - C.atg_special_round_out(ptr, out, self) +func AtgSpecialRoundOut(ptr *Ctensor, out Ctensor, self Ctensor, decimals int64){ +cdecimals := *(*C.int64_t)(unsafe.Pointer(&decimals)) + C.atg_special_round_out(ptr, out, self, cdecimals) } func AtgSpecialSinc(ptr *Ctensor, self Ctensor){ C.atg_special_sinc(ptr, self) @@ -7777,6 +7996,11 @@ func AtgSpecialSinc(ptr *Ctensor, self Ctensor){ func AtgSpecialSincOut(ptr *Ctensor, out Ctensor, self Ctensor){ C.atg_special_sinc_out(ptr, out, self) } +func AtgSpecialSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_special_softmax(ptr, self, cdim, cdtype) +} func AtgSpecialXlog1py(ptr *Ctensor, self Ctensor, other Ctensor){ C.atg_special_xlog1py(ptr, self, other) } diff --git a/libtch/torch_api_generated.cpp.h b/libtch/torch_api_generated.cpp.h index 8eb8d9e..402fb91 100644 --- a/libtch/torch_api_generated.cpp.h +++ b/libtch/torch_api_generated.cpp.h @@ -233,9 +233,16 @@ void atg__amp_update_scale_(tensor *out__, tensor self, tensor growth_tracker, t ) } -void atg__baddbmm_mkl_(tensor *out__, tensor self, tensor batch1, tensor batch2) { +void atg__autocast_to_full_precision(tensor *out__, tensor self, int cuda_enabled, int cpu_enabled) { PROTECT( - auto outputs__ = torch::_baddbmm_mkl_(*self, *batch1, *batch2); + auto outputs__ = self->_autocast_to_full_precision((bool)cuda_enabled, (bool)cpu_enabled); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__autocast_to_reduced_precision(tensor *out__, tensor self, int cuda_enabled, int cpu_enabled, int cuda_dtype, int cpu_dtype) { + PROTECT( + auto outputs__ = self->_autocast_to_reduced_precision((bool)cuda_enabled, (bool)cpu_enabled, at::ScalarType(cuda_dtype), at::ScalarType(cpu_dtype)); out__[0] = new torch::Tensor(outputs__); ) } @@ -373,14 +380,6 @@ void atg__conv_depthwise2d(tensor *out__, tensor self, tensor weight, int64_t *k ) } -void atg__conv_depthwise2d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { - PROTECT( - auto outputs__ = torch::_conv_depthwise2d_backward_out(*grad_input, *grad_weight, *grad_output, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - void atg__conv_depthwise2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::_conv_depthwise2d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); @@ -402,6 +401,20 @@ void atg__convert_indices_from_coo_to_csr_out(tensor *out__, tensor out, tensor ) } +void atg__convert_indices_from_csr_to_coo(tensor *out__, tensor crow_indices, tensor col_indices, int out_int32, int transpose) { + PROTECT( + auto outputs__ = torch::_convert_indices_from_csr_to_coo(*crow_indices, *col_indices, (bool)out_int32, (bool)transpose); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__convert_indices_from_csr_to_coo_out(tensor *out__, tensor out, tensor crow_indices, tensor col_indices, int out_int32, int transpose) { + PROTECT( + auto outputs__ = torch::_convert_indices_from_csr_to_coo_out(*out, *crow_indices, *col_indices, (bool)out_int32, (bool)transpose); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled, int allow_tf32) { PROTECT( auto outputs__ = torch::_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups, (bool)benchmark, (bool)deterministic, (bool)cudnn_enabled, (bool)allow_tf32); @@ -423,13 +436,6 @@ void atg__convolution_mode(tensor *out__, tensor input, tensor weight, tensor bi ) } -void atg__convolution_nogroup(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len) { - PROTECT( - auto outputs__ = torch::_convolution_nogroup(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len)); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg__copy_from(tensor *out__, tensor self, tensor dst, int non_blocking) { PROTECT( auto outputs__ = torch::_copy_from(*self, *dst, (bool)non_blocking); @@ -557,6 +563,13 @@ void atg__dirichlet_grad(tensor *out__, tensor x, tensor alpha, tensor total) { ) } +void atg__efficientzerotensor(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::_efficientzerotensor(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx) { PROTECT( auto outputs__ = torch::_embedding_bag(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), (bool)include_last_offset, padding_idx); @@ -760,6 +773,20 @@ int atg__has_compatible_shallow_copy_type(tensor self, tensor from) { return 0; } +int atg__has_same_storage_numel(tensor self, tensor other) { + PROTECT( + return torch::_has_same_storage_numel(*self, *other); + ) + return 0; +} + +void atg__histogramdd_from_bin_tensors(tensor *out__, tensor self, tensor *bins_data, int bins_len, tensor weight, int density) { + PROTECT( + auto outputs__ = torch::_histogramdd_from_bin_tensors(*self, of_carray_tensor(bins_data, bins_len), (weight ? *weight : torch::Tensor()), (bool)density); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = torch::_index_copy_(*self, dim, *index, *source); @@ -781,11 +808,11 @@ void atg__indices(tensor *out__, tensor self) { ) } -void atg__inverse_helper(tensor *out__, tensor self) { +int atg__is_zerotensor(tensor self) { PROTECT( - auto outputs__ = torch::_inverse_helper(*self); - out__[0] = new torch::Tensor(outputs__); + return torch::_is_zerotensor(*self); ) + return 0; } void atg__linalg_inv_out_helper_(tensor *out__, tensor self, tensor infos_lu, tensor infos_getri) { @@ -803,6 +830,24 @@ void atg__linalg_qr_helper(tensor *out__, tensor self, char* mode_ptr, int mode_ ) } +void atg__linalg_svd(tensor *out__, tensor A, int full_matrices, int compute_uv) { + PROTECT( + auto outputs__ = torch::_linalg_svd(*A, (bool)full_matrices, (bool)compute_uv); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + +void atg__linalg_svd_u(tensor *out__, tensor U, tensor S, tensor Vh, tensor A, int full_matrices, int compute_uv) { + PROTECT( + auto outputs__ = torch::_linalg_svd_out(*U, *S, *Vh, *A, (bool)full_matrices, (bool)compute_uv); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + void atg__log_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( auto outputs__ = torch::_log_softmax(*self, dim, (bool)half_to_float); @@ -810,16 +855,16 @@ void atg__log_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float ) } -void atg__log_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { +void atg__log_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, int input_dtype) { PROTECT( - auto outputs__ = torch::_log_softmax_backward_data(*grad_output, *output, dim, *self); + auto outputs__ = torch::_log_softmax_backward_data(*grad_output, *output, dim, at::ScalarType(input_dtype)); out__[0] = new torch::Tensor(outputs__); ) } -void atg__log_softmax_backward_data_out(tensor *out__, tensor out, tensor grad_output, tensor output, int64_t dim, tensor self) { +void atg__log_softmax_backward_data_out(tensor *out__, tensor out, tensor grad_output, tensor output, int64_t dim, int input_dtype) { PROTECT( - auto outputs__ = torch::_log_softmax_backward_data_out(*out, *grad_output, *output, dim, *self); + auto outputs__ = torch::_log_softmax_backward_data_out(*out, *grad_output, *output, dim, at::ScalarType(input_dtype)); out__[0] = new torch::Tensor(outputs__); ) } @@ -882,6 +927,13 @@ void atg__masked_scale(tensor *out__, tensor self, tensor mask, double scale) { ) } +void atg__masked_softmax(tensor *out__, tensor self, tensor mask) { + PROTECT( + auto outputs__ = torch::_masked_softmax(*self, *mask); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__mkldnn_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { PROTECT( auto outputs__ = torch::_mkldnn_reshape(*self, torch::IntArrayRef(shape_data, shape_len)); @@ -903,6 +955,13 @@ void atg__mkldnn_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t di ) } +void atg__native_multi_head_self_attention(tensor *out__, tensor query, tensor qkv_weight, tensor qkv_bias, tensor proj_weight, tensor proj_bias, tensor mask) { + PROTECT( + auto outputs__ = torch::_native_multi_head_self_attention(*query, *qkv_weight, *qkv_bias, *proj_weight, *proj_bias, (mask ? *mask : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__neg_view(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_neg_view(*self); @@ -910,6 +969,13 @@ void atg__neg_view(tensor *out__, tensor self) { ) } +void atg__new_zeros_with_same_feature_meta(tensor *out__, tensor self, tensor other, int64_t self_num_batch_dims) { + PROTECT( + auto outputs__ = torch::_new_zeros_with_same_feature_meta(*self, *other, self_num_batch_dims); + out__[0] = new torch::Tensor(outputs__); + ) +} + int atg__nnpack_available() { PROTECT( return torch::_nnpack_available(); @@ -924,20 +990,6 @@ void atg__nnpack_spatial_convolution(tensor *out__, tensor input, tensor weight, ) } -void atg__nnpack_spatial_convolution_backward_input(tensor *out__, tensor input, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len) { - PROTECT( - auto outputs__ = torch::_nnpack_spatial_convolution_backward_input(*input, *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len)); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg__nnpack_spatial_convolution_backward_weight(tensor *out__, tensor input, int64_t *weightsize_data, int weightsize_len, tensor grad_output, int64_t *padding_data, int padding_len) { - PROTECT( - auto outputs__ = torch::_nnpack_spatial_convolution_backward_weight(*input, torch::IntArrayRef(weightsize_data, weightsize_len), *grad_output, torch::IntArrayRef(padding_data, padding_len)); - out__[0] = new torch::Tensor(outputs__); - ) -} - int64_t atg__nnz(tensor self) { PROTECT( return self->_nnz(); @@ -1046,6 +1098,15 @@ void atg__shape_as_tensor(tensor *out__, tensor self) { ) } +void atg__slow_conv2d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { + PROTECT( + auto outputs__ = torch::_slow_conv2d_backward_out(*grad_input, *grad_weight, *grad_bias, *grad_output, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + void atg__sobol_engine_draw(tensor *out__, tensor quasi, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated, int dtype) { PROTECT( auto outputs__ = torch::_sobol_engine_draw(*quasi, n, *sobolstate, dimension, num_generated, at::ScalarType(dtype)); @@ -1082,16 +1143,16 @@ void atg__softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { ) } -void atg__softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { +void atg__softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, int input_dtype) { PROTECT( - auto outputs__ = torch::_softmax_backward_data(*grad_output, *output, dim, *self); + auto outputs__ = torch::_softmax_backward_data(*grad_output, *output, dim, at::ScalarType(input_dtype)); out__[0] = new torch::Tensor(outputs__); ) } -void atg__softmax_backward_data_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output, int64_t dim, tensor self) { +void atg__softmax_backward_data_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output, int64_t dim, int input_dtype) { PROTECT( - auto outputs__ = torch::_softmax_backward_data_out(*grad_input, *grad_output, *output, dim, *self); + auto outputs__ = torch::_softmax_backward_data_out(*grad_input, *grad_output, *output, dim, at::ScalarType(input_dtype)); out__[0] = new torch::Tensor(outputs__); ) } @@ -1118,6 +1179,13 @@ void atg__sparse_addmm(tensor *out__, tensor self, tensor sparse, tensor dense) ) } +void atg__sparse_broadcast_to(tensor *out__, tensor self, int64_t *size_data, int size_len) { + PROTECT( + auto outputs__ = torch::_sparse_broadcast_to(*self, torch::IntArrayRef(size_data, size_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__sparse_coo_tensor_unsafe(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::_sparse_coo_tensor_unsafe(*indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); @@ -1272,15 +1340,6 @@ void atg__standard_gamma_grad(tensor *out__, tensor self, tensor output) { ) } -void atg__svd_helper(tensor *out__, tensor self, int some, int compute_uv) { - PROTECT( - auto outputs__ = torch::_svd_helper(*self, (bool)some, (bool)compute_uv); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - ) -} - void atg__symeig_helper(tensor *out__, tensor self, int eigenvectors, int upper) { PROTECT( auto outputs__ = torch::_symeig_helper(*self, (bool)eigenvectors, (bool)upper); @@ -1331,6 +1390,13 @@ void atg__test_string_default(tensor *out__, tensor dummy, char* a_ptr, int a_le ) } +void atg__test_warn_in_autograd(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::_test_warn_in_autograd(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__to_copy(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking) { PROTECT( auto outputs__ = torch::_to_copy(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), (bool)non_blocking); @@ -1351,6 +1417,13 @@ tensor *atg__to_cpu(tensor *tensors_data, int tensors_len) { return nullptr; } +void atg__torch_cuda_cu_linker_symbol_op(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::_torch_cuda_cu_linker_symbol_op(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__trilinear(tensor *out__, tensor i1, tensor i2, tensor i3, int64_t *expand1_data, int expand1_len, int64_t *expand2_data, int expand2_len, int64_t *expand3_data, int expand3_len, int64_t *sumdim_data, int sumdim_len, int64_t unroll_dim) { PROTECT( auto outputs__ = torch::_trilinear(*i1, *i2, *i3, torch::IntArrayRef(expand1_data, expand1_len), torch::IntArrayRef(expand2_data, expand2_len), torch::IntArrayRef(expand3_data, expand3_len), torch::IntArrayRef(sumdim_data, sumdim_len), unroll_dim); @@ -1390,6 +1463,146 @@ void atg__unsafe_view(tensor *out__, tensor self, int64_t *size_data, int size_l ) } +void atg__upsample_bicubic2d_aa(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bicubic2d_aa(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_bicubic2d_aa_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bicubic2d_aa_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_bicubic2d_aa_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bicubic2d_aa_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_bicubic2d_aa_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bicubic2d_aa_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_bilinear2d_aa(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bilinear2d_aa(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_bilinear2d_aa_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bilinear2d_aa_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_bilinear2d_aa_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bilinear2d_aa_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_bilinear2d_aa_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_bilinear2d_aa_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact1d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact2d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact3d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__upsample_nearest_exact3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { + PROTECT( + auto outputs__ = torch::_upsample_nearest_exact3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + int atg__use_cudnn_ctc_loss(tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank) { PROTECT( return torch::_use_cudnn_ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank); @@ -1804,6 +2017,13 @@ void atg_addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor ve ) } +void atg_adjoint(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::adjoint(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_affine_grid_generator(tensor *out__, tensor theta, int64_t *size_data, int size_len, int align_corners) { PROTECT( auto outputs__ = torch::affine_grid_generator(*theta, torch::IntArrayRef(size_data, size_len), (bool)align_corners); @@ -2106,6 +2326,27 @@ void atg_arctan(tensor *out__, tensor self) { ) } +void atg_arctan2(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::arctan2(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctan2_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->arctan2_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctan2_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::arctan2_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_arctan_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arctan_(*self); @@ -2176,6 +2417,13 @@ void atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { ) } +void atg_argwhere(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::argwhere(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset_v, uint8_t storage_offset_null) { PROTECT( auto outputs__ = torch::as_strided(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset_null ? c10::nullopt : c10::optional(storage_offset_v)); @@ -3453,15 +3701,6 @@ void atg_conv_depthwise3d(tensor *out__, tensor self, tensor weight, int64_t *ke ) } -void atg_conv_depthwise3d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { - PROTECT( - auto outputs__ = torch::conv_depthwise3d_backward_out(*grad_input, *grad_weight, *grad_bias, *grad_output, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - ) -} - void atg_conv_tbc(tensor *out__, tensor self, tensor weight, tensor bias, int64_t pad) { PROTECT( auto outputs__ = torch::conv_tbc(*self, *weight, *bias, pad); @@ -3735,34 +3974,6 @@ void atg_cudnn_convolution_add_relu(tensor *out__, tensor self, tensor weight, t ) } -void atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { - PROTECT( - auto outputs__ = torch::cudnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { - PROTECT( - auto outputs__ = torch::cudnn_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_cudnn_convolution_deprecated(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::cudnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_cudnn_convolution_deprecated2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::cudnn_convolution(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg_cudnn_convolution_relu(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::cudnn_convolution_relu(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); @@ -3777,34 +3988,6 @@ void atg_cudnn_convolution_transpose(tensor *out__, tensor self, tensor weight, ) } -void atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { - PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { - PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_cudnn_convolution_transpose_deprecated(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_cudnn_convolution_transpose_deprecated2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg_cudnn_grid_sampler(tensor *out__, tensor self, tensor grid) { PROTECT( auto outputs__ = torch::cudnn_grid_sampler(*self, *grid); @@ -4054,6 +4237,13 @@ void atg_diagonal_backward(tensor *out__, tensor grad_output, int64_t *input_siz ) } +void atg_diagonal_scatter(tensor *out__, tensor self, tensor src, int64_t offset, int64_t dim1, int64_t dim2) { + PROTECT( + auto outputs__ = torch::diagonal_scatter(*self, *src, offset, dim1, dim2); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_diff(tensor *out__, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append) { PROTECT( auto outputs__ = torch::diff(*self, n, dim, (prepend ? *prepend : torch::Tensor()), (append ? *append : torch::Tensor())); @@ -4874,6 +5064,20 @@ void atg_fft_hfft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64 ) } +void atg_fft_hfft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_hfft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_hfft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_hfft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_fft_hfft_out(tensor *out__, tensor out, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { PROTECT( auto outputs__ = torch::fft_hfft_out(*out, *self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); @@ -4881,6 +5085,20 @@ void atg_fft_hfft_out(tensor *out__, tensor out, tensor self, int64_t n_v, uint8 ) } +void atg_fft_hfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_hfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_hfftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_hfftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_fft_ifft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { PROTECT( auto outputs__ = torch::fft_ifft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); @@ -4937,6 +5155,20 @@ void atg_fft_ihfft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int6 ) } +void atg_fft_ihfft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_ihfft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_ihfft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_ihfft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_fft_ihfft_out(tensor *out__, tensor out, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { PROTECT( auto outputs__ = torch::fft_ihfft_out(*out, *self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); @@ -4944,6 +5176,20 @@ void atg_fft_ihfft_out(tensor *out__, tensor out, tensor self, int64_t n_v, uint ) } +void atg_fft_ihfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_ihfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_ihfftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_ihfftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_fft_irfft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { PROTECT( auto outputs__ = torch::fft_irfft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); @@ -5736,14 +5982,6 @@ void atg_grid_sampler_2d(tensor *out__, tensor input, tensor grid, int64_t inter ) } -void atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { - PROTECT( - auto outputs__ = torch::grid_sampler_2d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - void atg_grid_sampler_3d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler_3d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); @@ -6263,16 +6501,9 @@ void atg_index_add_(tensor *out__, tensor self, int64_t dim, tensor index, tenso ) } -void atg_index_add_alpha(tensor *out__, tensor self, int64_t dim, tensor index, tensor source, scalar alpha) { +void atg_index_add_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( - auto outputs__ = torch::index_add(*self, dim, *index, *source, *alpha); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_index_add_alpha_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source, scalar alpha) { - PROTECT( - auto outputs__ = self->index_add_(dim, *index, *source, *alpha); + auto outputs__ = torch::index_add_out(*out, *self, dim, *index, *source); out__[0] = new torch::Tensor(outputs__); ) } @@ -7044,6 +7275,20 @@ void atg_linalg_cond_p_str_out(tensor *out__, tensor out, tensor self, char* p_p ) } +void atg_linalg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { + PROTECT( + auto outputs__ = torch::linalg_cross(*self, *other, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim) { + PROTECT( + auto outputs__ = torch::linalg_cross_out(*out, *self, *other, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_linalg_det(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::linalg_det(*self); @@ -7058,6 +7303,13 @@ void atg_linalg_det_out(tensor *out__, tensor out, tensor self) { ) } +void atg_linalg_diagonal(tensor *out__, tensor A, int64_t offset, int64_t dim1, int64_t dim2) { + PROTECT( + auto outputs__ = torch::linalg_diagonal(*A, offset, dim1, dim2); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_linalg_eig(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::linalg_eig(*self); @@ -7182,6 +7434,40 @@ void atg_linalg_lstsq_out(tensor *out__, tensor solution, tensor residuals, tens ) } +void atg_linalg_lu_factor(tensor *out__, tensor A, int pivot) { + PROTECT( + auto outputs__ = torch::linalg_lu_factor(*A, (bool)pivot); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + ) +} + +void atg_linalg_lu_factor_ex(tensor *out__, tensor A, int pivot, int check_errors) { + PROTECT( + auto outputs__ = torch::linalg_lu_factor_ex(*A, (bool)pivot, (bool)check_errors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + +void atg_linalg_lu_factor_ex_out(tensor *out__, tensor LU, tensor pivots, tensor info, tensor A, int pivot, int check_errors) { + PROTECT( + auto outputs__ = torch::linalg_lu_factor_ex_out(*LU, *pivots, *info, *A, (bool)pivot, (bool)check_errors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + +void atg_linalg_lu_factor_out(tensor *out__, tensor LU, tensor pivots, tensor A, int pivot) { + PROTECT( + auto outputs__ = torch::linalg_lu_factor_out(*LU, *pivots, *A, (bool)pivot); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + ) +} + void atg_linalg_matmul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::linalg_matmul(*self, *other); @@ -7196,6 +7482,13 @@ void atg_linalg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) ) } +void atg_linalg_matrix_exp(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_matrix_exp(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_linalg_matrix_power(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::linalg_matrix_power(*self, n); @@ -7210,16 +7503,44 @@ void atg_linalg_matrix_power_out(tensor *out__, tensor out, tensor self, int64_t ) } -void atg_linalg_matrix_rank(tensor *out__, tensor self, double tol_v, uint8_t tol_null, int hermitian) { +void atg_linalg_matrix_rank(tensor *out__, tensor self, double tol, int hermitian) { PROTECT( - auto outputs__ = torch::linalg_matrix_rank(*self, tol_null ? c10::nullopt : c10::optional(tol_v), (bool)hermitian); + auto outputs__ = torch::linalg_matrix_rank(*self, tol, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); ) } -void atg_linalg_matrix_rank_out(tensor *out__, tensor out, tensor self, double tol_v, uint8_t tol_null, int hermitian) { +void atg_linalg_matrix_rank_atol_rtol_float(tensor *out__, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian) { PROTECT( - auto outputs__ = torch::linalg_matrix_rank_out(*out, *self, tol_null ? c10::nullopt : c10::optional(tol_v), (bool)hermitian); + auto outputs__ = torch::linalg_matrix_rank(*self, atol_null ? c10::nullopt : c10::optional(atol_v), rtol_null ? c10::nullopt : c10::optional(rtol_v), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_matrix_rank_atol_rtol_float_out(tensor *out__, tensor out, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank_out(*out, *self, atol_null ? c10::nullopt : c10::optional(atol_v), rtol_null ? c10::nullopt : c10::optional(rtol_v), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_matrix_rank_atol_rtol_tensor(tensor *out__, tensor input, tensor atol, tensor rtol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank(*input, (atol ? *atol : torch::Tensor()), (rtol ? *rtol : torch::Tensor()), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_matrix_rank_atol_rtol_tensor_out(tensor *out__, tensor out, tensor input, tensor atol, tensor rtol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank_out(*out, *input, (atol ? *atol : torch::Tensor()), (rtol ? *rtol : torch::Tensor()), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_matrix_rank_out(tensor *out__, tensor out, tensor self, double tol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank_out(*out, *self, tol, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); ) } @@ -7287,6 +7608,34 @@ void atg_linalg_pinv(tensor *out__, tensor self, double rcond, int hermitian) { ) } +void atg_linalg_pinv_atol_rtol_float(tensor *out__, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv(*self, atol_null ? c10::nullopt : c10::optional(atol_v), rtol_null ? c10::nullopt : c10::optional(rtol_v), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_pinv_atol_rtol_float_out(tensor *out__, tensor out, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv_out(*out, *self, atol_null ? c10::nullopt : c10::optional(atol_v), rtol_null ? c10::nullopt : c10::optional(rtol_v), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_pinv_atol_rtol_tensor(tensor *out__, tensor self, tensor atol, tensor rtol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv(*self, (atol ? *atol : torch::Tensor()), (rtol ? *rtol : torch::Tensor()), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_pinv_atol_rtol_tensor_out(tensor *out__, tensor out, tensor self, tensor atol, tensor rtol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv_out(*out, *self, (atol ? *atol : torch::Tensor()), (rtol ? *rtol : torch::Tensor()), (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_linalg_pinv_out(tensor *out__, tensor out, tensor self, double rcond, int hermitian) { PROTECT( auto outputs__ = torch::linalg_pinv_out(*out, *self, rcond, (bool)hermitian); @@ -7354,34 +7703,48 @@ void atg_linalg_solve_out(tensor *out__, tensor out, tensor input, tensor other) ) } -void atg_linalg_svd(tensor *out__, tensor self, int full_matrices) { +void atg_linalg_solve_triangular(tensor *out__, tensor self, tensor B, int upper, int left, int unitriangular) { PROTECT( - auto outputs__ = torch::linalg_svd(*self, (bool)full_matrices); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - ) -} - -void atg_linalg_svd_u(tensor *out__, tensor U, tensor S, tensor Vh, tensor self, int full_matrices) { - PROTECT( - auto outputs__ = torch::linalg_svd_out(*U, *S, *Vh, *self, (bool)full_matrices); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - ) -} - -void atg_linalg_svdvals(tensor *out__, tensor input) { - PROTECT( - auto outputs__ = torch::linalg_svdvals(*input); + auto outputs__ = torch::linalg_solve_triangular(*self, *B, (bool)upper, (bool)left, (bool)unitriangular); out__[0] = new torch::Tensor(outputs__); ) } -void atg_linalg_svdvals_out(tensor *out__, tensor out, tensor input) { +void atg_linalg_solve_triangular_out(tensor *out__, tensor out, tensor self, tensor B, int upper, int left, int unitriangular) { PROTECT( - auto outputs__ = torch::linalg_svdvals_out(*out, *input); + auto outputs__ = torch::linalg_solve_triangular_out(*out, *self, *B, (bool)upper, (bool)left, (bool)unitriangular); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_svd(tensor *out__, tensor A, int full_matrices) { + PROTECT( + auto outputs__ = torch::linalg_svd(*A, (bool)full_matrices); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + +void atg_linalg_svd_u(tensor *out__, tensor U, tensor S, tensor Vh, tensor A, int full_matrices) { + PROTECT( + auto outputs__ = torch::linalg_svd_out(*U, *S, *Vh, *A, (bool)full_matrices); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + +void atg_linalg_svdvals(tensor *out__, tensor A) { + PROTECT( + auto outputs__ = torch::linalg_svdvals(*A); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_svdvals_out(tensor *out__, tensor out, tensor A) { + PROTECT( + auto outputs__ = torch::linalg_svdvals_out(*out, *A); out__[0] = new torch::Tensor(outputs__); ) } @@ -7428,16 +7791,16 @@ void atg_linear_out(tensor *out__, tensor out, tensor input, tensor weight, tens ) } -void atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, int options_kind, int options_device) { +void atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::linspace(*start, *end, steps_null ? c10::nullopt : c10::optional(steps_v), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::linspace(*start, *end, steps, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); ) } -void atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null) { +void atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps) { PROTECT( - auto outputs__ = torch::linspace_out(*out, *start, *end, steps_null ? c10::nullopt : c10::optional(steps_v)); + auto outputs__ = torch::linspace_out(*out, *start, *end, steps); out__[0] = new torch::Tensor(outputs__); ) } @@ -7736,16 +8099,16 @@ void atg_logit_out(tensor *out__, tensor out, tensor self, double eps_v, uint8_t ) } -void atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base, int options_kind, int options_device) { +void atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::logspace(*start, *end, steps_null ? c10::nullopt : c10::optional(steps_v), base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::logspace(*start, *end, steps, base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); ) } -void atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base) { +void atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps, double base) { PROTECT( - auto outputs__ = torch::logspace_out(*out, *start, *end, steps_null ? c10::nullopt : c10::optional(steps_v), base); + auto outputs__ = torch::logspace_out(*out, *start, *end, steps, base); out__[0] = new torch::Tensor(outputs__); ) } @@ -7978,6 +8341,13 @@ void atg_matrix_exp_backward(tensor *out__, tensor self, tensor grad) { ) } +void atg_matrix_h(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->matrix_H(); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_matrix_power(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::matrix_power(*self, n); @@ -8272,6 +8642,13 @@ tensor *atg_meshgrid_indexing(tensor *tensors_data, int tensors_len, char* index return nullptr; } +void atg_mh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->mH(); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_min(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::min(*self); @@ -8348,27 +8725,6 @@ void atg_miopen_convolution(tensor *out__, tensor self, tensor weight, tensor bi ) } -void atg_miopen_convolution_backward_bias(tensor *out__, tensor grad_output) { - PROTECT( - auto outputs__ = torch::miopen_convolution_backward_bias(*grad_output); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_miopen_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::miopen_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_miopen_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::miopen_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); @@ -8376,20 +8732,6 @@ void atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, ) } -void atg_miopen_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::miopen_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_miopen_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::miopen_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_depthwise_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); @@ -8397,20 +8739,6 @@ void atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, ) } -void atg_miopen_depthwise_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::miopen_depthwise_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_miopen_depthwise_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { - PROTECT( - auto outputs__ = torch::miopen_depthwise_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { PROTECT( auto outputs__ = torch::miopen_rnn(*input, of_carray_tensor(weight_data, weight_len), weight_stride0, *hx, (cx ? *cx : torch::Tensor()), mode, hidden_size, num_layers, (bool)batch_first, dropout, (bool)train, (bool)bidirectional, torch::IntArrayRef(batch_sizes_data, batch_sizes_len), (dropout_state ? *dropout_state : torch::Tensor())); @@ -8471,21 +8799,6 @@ void atg_mkldnn_convolution(tensor *out__, tensor self, tensor weight, tensor bi ) } -void atg_mkldnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { - PROTECT( - auto outputs__ = torch::mkldnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { - PROTECT( - auto outputs__ = torch::mkldnn_convolution_backward_weights(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - void atg_mkldnn_linear(tensor *out__, tensor self, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::mkldnn_linear(*self, *weight, (bias ? *bias : torch::Tensor())); @@ -8650,6 +8963,13 @@ void atg_msort_out(tensor *out__, tensor out, tensor self) { ) } +void atg_mt(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->mT(); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_mul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::mul(*self, *other); @@ -8869,62 +9189,34 @@ void atg_nanmedian_dim_values(tensor *out__, tensor values, tensor indices, tens ) } -void atg_nanquantile(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::nanquantile(*self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_nanquantile_new(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_nanquantile(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::nanquantile(*self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_nanquantile_new_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_nanquantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::nanquantile_out(*out, *self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_nanquantile_new_scalar(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_nanquantile_scalar(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::nanquantile(*self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_nanquantile_new_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_nanquantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::nanquantile_out(*out, *self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_nanquantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::nanquantile_out(*out, *self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_nanquantile_scalar(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::nanquantile(*self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_nanquantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::nanquantile_out(*out, *self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg_nansum(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::nansum(*self, at::ScalarType(dtype)); @@ -8992,6 +9284,28 @@ void atg_native_batch_norm_out(tensor *out__, tensor out, tensor save_mean, tens ) } +void atg_native_channel_shuffle(tensor *out__, tensor self, int64_t groups) { + PROTECT( + auto outputs__ = torch::native_channel_shuffle(*self, groups); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_native_dropout(tensor *out__, tensor input, double p, int train) { + PROTECT( + auto outputs__ = torch::native_dropout(*input, p, (bool)train); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + ) +} + +void atg_native_dropout_backward(tensor *out__, tensor grad_output, tensor mask, double scale) { + PROTECT( + auto outputs__ = torch::native_dropout_backward(*grad_output, *mask, scale); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_native_group_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t n, int64_t C, int64_t HxW, int64_t group, double eps) { PROTECT( auto outputs__ = torch::native_group_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), n, C, HxW, group, eps); @@ -9761,62 +10075,34 @@ void atg_qr_q(tensor *out__, tensor Q, tensor R, tensor self, int some) { ) } -void atg_quantile(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::quantile(*self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_quantile_new(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_quantile(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::quantile(*self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_quantile_new_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_quantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::quantile_out(*out, *self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_quantile_new_scalar(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_quantile_scalar(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::quantile(*self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_quantile_new_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { +void atg_quantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len) { PROTECT( auto outputs__ = torch::quantile_out(*out, *self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim, std::string(interpolation_ptr, interpolation_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_quantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::quantile_out(*out, *self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_quantile_scalar(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::quantile(*self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - -void atg_quantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { - PROTECT( - auto outputs__ = torch::quantile_out(*out, *self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); - ) -} - void atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_channel(*self, *scales, *zero_points, axis, at::ScalarType(dtype)); @@ -9831,6 +10117,13 @@ void atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t z ) } +void atg_quantize_per_tensor_dynamic(tensor *out__, tensor self, int dtype, int reduce_range) { + PROTECT( + auto outputs__ = torch::quantize_per_tensor_dynamic(*self, at::ScalarType(dtype), (bool)reduce_range); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_quantize_per_tensor_tensor_qparams(tensor *out__, tensor self, tensor scale, tensor zero_point, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_tensor(*self, *scale, *zero_point, at::ScalarType(dtype)); @@ -10528,6 +10821,27 @@ void atg_round_(tensor *out__, tensor self) { ) } +void atg_round_decimals(tensor *out__, tensor self, int64_t decimals) { + PROTECT( + auto outputs__ = torch::round(*self, decimals); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_round_decimals_(tensor *out__, tensor self, int64_t decimals) { + PROTECT( + auto outputs__ = torch::round_(*self, decimals); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_round_decimals_out(tensor *out__, tensor out, tensor self, int64_t decimals) { + PROTECT( + auto outputs__ = torch::round_out(*out, *self, decimals); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_round_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::round_out(*out, *self); @@ -10738,23 +11052,23 @@ void atg_scatter_value_reduce_out(tensor *out__, tensor out, tensor self, int64_ ) } -void atg_searchsorted(tensor *out__, tensor sorted_sequence, tensor self, int out_int32, int right) { +void atg_searchsorted(tensor *out__, tensor sorted_sequence, tensor self, int out_int32, int right, char* side_ptr, int side_len, tensor sorter) { PROTECT( - auto outputs__ = torch::searchsorted(*sorted_sequence, *self, (bool)out_int32, (bool)right); + auto outputs__ = torch::searchsorted(*sorted_sequence, *self, (bool)out_int32, (bool)right, std::string(side_ptr, side_len), (sorter ? *sorter : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); ) } -void atg_searchsorted_scalar(tensor *out__, tensor sorted_sequence, scalar self_scalar, int out_int32, int right) { +void atg_searchsorted_scalar(tensor *out__, tensor sorted_sequence, scalar self_scalar, int out_int32, int right, char* side_ptr, int side_len, tensor sorter) { PROTECT( - auto outputs__ = torch::searchsorted(*sorted_sequence, *self_scalar, (bool)out_int32, (bool)right); + auto outputs__ = torch::searchsorted(*sorted_sequence, *self_scalar, (bool)out_int32, (bool)right, std::string(side_ptr, side_len), (sorter ? *sorter : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); ) } -void atg_searchsorted_tensor_out(tensor *out__, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right) { +void atg_searchsorted_tensor_out(tensor *out__, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right, char* side_ptr, int side_len, tensor sorter) { PROTECT( - auto outputs__ = torch::searchsorted_out(*out, *sorted_sequence, *self, (bool)out_int32, (bool)right); + auto outputs__ = torch::searchsorted_out(*out, *sorted_sequence, *self, (bool)out_int32, (bool)right, std::string(side_ptr, side_len), (sorter ? *sorter : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); ) } @@ -10780,6 +11094,13 @@ void atg_select_backward(tensor *out__, tensor grad_output, int64_t *input_sizes ) } +void atg_select_scatter(tensor *out__, tensor self, tensor src, int64_t dim, int64_t index) { + PROTECT( + auto outputs__ = torch::select_scatter(*self, *src, dim, index); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_selu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::selu(*self); @@ -11018,6 +11339,13 @@ void atg_slice_backward(tensor *out__, tensor grad_output, int64_t *input_sizes_ ) } +void atg_slice_scatter(tensor *out__, tensor self, tensor src, int64_t dim, int64_t start_v, uint8_t start_null, int64_t end_v, uint8_t end_null, int64_t step) { + PROTECT( + auto outputs__ = torch::slice_scatter(*self, *src, dim, start_null ? c10::nullopt : c10::optional(start_v), end_null ? c10::nullopt : c10::optional(end_v), step); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_slogdet(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::slogdet(*self); @@ -11159,16 +11487,16 @@ void atg_softplus(tensor *out__, tensor self) { ) } -void atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { +void atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar beta, scalar threshold) { PROTECT( - auto outputs__ = torch::softplus_backward(*grad_output, *self, *beta, *threshold, *output); + auto outputs__ = torch::softplus_backward(*grad_output, *self, *beta, *threshold); out__[0] = new torch::Tensor(outputs__); ) } -void atg_softplus_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { +void atg_softplus_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold) { PROTECT( - auto outputs__ = torch::softplus_backward_out(*grad_input, *grad_output, *self, *beta, *threshold, *output); + auto outputs__ = torch::softplus_backward_out(*grad_input, *grad_output, *self, *beta, *threshold); out__[0] = new torch::Tensor(outputs__); ) } @@ -11319,6 +11647,20 @@ void atg_sparse_resize_and_clear_(tensor *out__, tensor self, int64_t *size_data ) } +void atg_sparse_sampled_addmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { + PROTECT( + auto outputs__ = torch::sparse_sampled_addmm(*self, *mat1, *mat2); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_sparse_sampled_addmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { + PROTECT( + auto outputs__ = torch::sparse_sampled_addmm_out(*out, *self, *mat1, *mat2); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_special_digamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_digamma(*self); @@ -11662,16 +12004,16 @@ void atg_special_psi_out(tensor *out__, tensor out, tensor self) { ) } -void atg_special_round(tensor *out__, tensor self) { +void atg_special_round(tensor *out__, tensor self, int64_t decimals) { PROTECT( - auto outputs__ = torch::special_round(*self); + auto outputs__ = torch::special_round(*self, decimals); out__[0] = new torch::Tensor(outputs__); ) } -void atg_special_round_out(tensor *out__, tensor out, tensor self) { +void atg_special_round_out(tensor *out__, tensor out, tensor self, int64_t decimals) { PROTECT( - auto outputs__ = torch::special_round_out(*out, *self); + auto outputs__ = torch::special_round_out(*out, *self, decimals); out__[0] = new torch::Tensor(outputs__); ) } @@ -11690,6 +12032,13 @@ void atg_special_sinc_out(tensor *out__, tensor out, tensor self) { ) } +void atg_special_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::special_softmax(*self, dim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_special_xlog1py(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_xlog1py(*self, *other); diff --git a/libtch/torch_api_generated.h b/libtch/torch_api_generated.h index 97dd10c..8caca34 100644 --- a/libtch/torch_api_generated.h +++ b/libtch/torch_api_generated.h @@ -33,7 +33,8 @@ void atg__add_relu_scalar_(tensor *, tensor self, scalar other); void atg__aminmax(tensor *, tensor self); void atg__aminmax_dim(tensor *, tensor self, int64_t dim, int keepdim); void atg__amp_update_scale_(tensor *, tensor self, tensor growth_tracker, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); -void atg__baddbmm_mkl_(tensor *, tensor self, tensor batch1, tensor batch2); +void atg__autocast_to_full_precision(tensor *, tensor self, int cuda_enabled, int cpu_enabled); +void atg__autocast_to_reduced_precision(tensor *, tensor self, int cuda_enabled, int cpu_enabled, int cuda_dtype, int cpu_dtype); void atg__cast_byte(tensor *, tensor self, int non_blocking); void atg__cast_char(tensor *, tensor self, int non_blocking); void atg__cast_double(tensor *, tensor self, int non_blocking); @@ -53,14 +54,14 @@ void atg__compute_linear_combination_out(tensor *, tensor out, tensor input, ten void atg__conj(tensor *, tensor self); void atg__conj_physical(tensor *, tensor self); void atg__conv_depthwise2d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); -void atg__conv_depthwise2d_backward(tensor *, tensor grad_input, tensor grad_weight, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); void atg__conv_depthwise2d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); void atg__convert_indices_from_coo_to_csr(tensor *, tensor self, int64_t size, int out_int32); void atg__convert_indices_from_coo_to_csr_out(tensor *, tensor out, tensor self, int64_t size, int out_int32); +void atg__convert_indices_from_csr_to_coo(tensor *, tensor crow_indices, tensor col_indices, int out_int32, int transpose); +void atg__convert_indices_from_csr_to_coo_out(tensor *, tensor out, tensor crow_indices, tensor col_indices, int out_int32, int transpose); void atg__convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled, int allow_tf32); void atg__convolution_deprecated(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled); void atg__convolution_mode(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char* padding_ptr, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -void atg__convolution_nogroup(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len); void atg__copy_from(tensor *, tensor self, tensor dst, int non_blocking); void atg__copy_from_and_resize(tensor *, tensor self, tensor dst); void atg__ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int zero_infinity); @@ -78,6 +79,7 @@ void atg__dim_arange(tensor *, tensor like, int64_t dim); int64_t atg__dimi(tensor self); int64_t atg__dimv(tensor self); void atg__dirichlet_grad(tensor *, tensor x, tensor alpha, tensor total); +void atg__efficientzerotensor(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); void atg__embedding_bag(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx); void atg__embedding_bag_backward(tensor *, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int64_t padding_idx); void atg__embedding_bag_dense_backward(tensor *, tensor grad, tensor indices, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights, int64_t padding_idx); @@ -105,15 +107,19 @@ void atg__gather_sparse_backward(tensor *, tensor self, int64_t dim, tensor inde void atg__grid_sampler_2d_cpu_fallback(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg__grid_sampler_2d_cpu_fallback_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); int atg__has_compatible_shallow_copy_type(tensor self, tensor from); +int atg__has_same_storage_numel(tensor self, tensor other); +void atg__histogramdd_from_bin_tensors(tensor *, tensor self, tensor *bins_data, int bins_len, tensor weight, int density); void atg__index_copy_(tensor *, tensor self, int64_t dim, tensor index, tensor source); void atg__index_put_impl_(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate, int unsafe); void atg__indices(tensor *, tensor self); -void atg__inverse_helper(tensor *, tensor self); +int atg__is_zerotensor(tensor self); void atg__linalg_inv_out_helper_(tensor *, tensor self, tensor infos_lu, tensor infos_getri); void atg__linalg_qr_helper(tensor *, tensor self, char* mode_ptr, int mode_len); +void atg__linalg_svd(tensor *, tensor A, int full_matrices, int compute_uv); +void atg__linalg_svd_u(tensor *, tensor U, tensor S, tensor Vh, tensor A, int full_matrices, int compute_uv); void atg__log_softmax(tensor *, tensor self, int64_t dim, int half_to_float); -void atg__log_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); -void atg__log_softmax_backward_data_out(tensor *, tensor out, tensor grad_output, tensor output, int64_t dim, tensor self); +void atg__log_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, int input_dtype); +void atg__log_softmax_backward_data_out(tensor *, tensor out, tensor grad_output, tensor output, int64_t dim, int input_dtype); void atg__log_softmax_out(tensor *, tensor out, tensor self, int64_t dim, int half_to_float); void atg__logcumsumexp(tensor *, tensor self, int64_t dim); void atg__logcumsumexp_out(tensor *, tensor out, tensor self, int64_t dim); @@ -122,14 +128,15 @@ void atg__make_dual(tensor *, tensor primal, tensor tangent, int64_t level); void atg__make_per_channel_quantized_tensor(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis); void atg__make_per_tensor_quantized_tensor(tensor *, tensor self, double scale, int64_t zero_point); void atg__masked_scale(tensor *, tensor self, tensor mask, double scale); +void atg__masked_softmax(tensor *, tensor self, tensor mask); void atg__mkldnn_reshape(tensor *, tensor self, int64_t *shape_data, int shape_len); void atg__mkldnn_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); void atg__mkldnn_transpose_(tensor *, tensor self, int64_t dim0, int64_t dim1); +void atg__native_multi_head_self_attention(tensor *, tensor query, tensor qkv_weight, tensor qkv_bias, tensor proj_weight, tensor proj_bias, tensor mask); void atg__neg_view(tensor *, tensor self); +void atg__new_zeros_with_same_feature_meta(tensor *, tensor self, tensor other, int64_t self_num_batch_dims); int atg__nnpack_available(); void atg__nnpack_spatial_convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -void atg__nnpack_spatial_convolution_backward_input(tensor *, tensor input, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len); -void atg__nnpack_spatial_convolution_backward_weight(tensor *, tensor input, int64_t *weightsize_data, int weightsize_len, tensor grad_output, int64_t *padding_data, int padding_len); int64_t atg__nnz(tensor self); void atg__pack_padded_sequence(tensor *, tensor input, tensor lengths, int batch_first); void atg__pack_padded_sequence_backward(tensor *, tensor grad, int64_t *input_size_data, int input_size_len, tensor batch_sizes, int batch_first); @@ -145,16 +152,18 @@ void atg__sample_dirichlet(tensor *, tensor self); void atg__saturate_weight_to_fp16(tensor *, tensor weight); void atg__segment_reduce_backward(tensor *, tensor grad, tensor output, tensor data, char* reduce_ptr, int reduce_len, tensor lengths, int64_t axis); void atg__shape_as_tensor(tensor *, tensor self); +void atg__slow_conv2d_backward(tensor *, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); void atg__sobol_engine_draw(tensor *, tensor quasi, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated, int dtype); void atg__sobol_engine_ff_(tensor *, tensor self, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated); void atg__sobol_engine_initialize_state_(tensor *, tensor self, int64_t dimension); void atg__sobol_engine_scramble_(tensor *, tensor self, tensor ltm, int64_t dimension); void atg__softmax(tensor *, tensor self, int64_t dim, int half_to_float); -void atg__softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); -void atg__softmax_backward_data_out(tensor *, tensor grad_input, tensor grad_output, tensor output, int64_t dim, tensor self); +void atg__softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, int input_dtype); +void atg__softmax_backward_data_out(tensor *, tensor grad_input, tensor grad_output, tensor output, int64_t dim, int input_dtype); void atg__softmax_out(tensor *, tensor out, tensor self, int64_t dim, int half_to_float); void atg__solve_helper(tensor *, tensor self, tensor A); void atg__sparse_addmm(tensor *, tensor self, tensor sparse, tensor dense); +void atg__sparse_broadcast_to(tensor *, tensor self, int64_t *size_data, int size_len); void atg__sparse_coo_tensor_unsafe(tensor *, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); void atg__sparse_coo_tensor_with_dims(tensor *, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, int options_kind, int options_device); void atg__sparse_coo_tensor_with_dims_and_tensors(tensor *, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, tensor indices, tensor values, int options_kind, int options_device); @@ -177,7 +186,6 @@ void atg__stack(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); void atg__stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); void atg__standard_gamma(tensor *, tensor self); void atg__standard_gamma_grad(tensor *, tensor self, tensor output); -void atg__svd_helper(tensor *, tensor self, int some, int compute_uv); void atg__symeig_helper(tensor *, tensor self, int eigenvectors, int upper); void atg__test_ambiguous_defaults(tensor *, tensor dummy, int64_t a, int64_t b); void atg__test_ambiguous_defaults_b(tensor *, tensor dummy, int64_t a, char* b_ptr, int b_len); @@ -185,13 +193,35 @@ void atg__test_optional_filled_intlist(tensor *, tensor values, int64_t *addends void atg__test_optional_intlist(tensor *, tensor values, int64_t *addends_data, int addends_len); void atg__test_serialization_subcmul(tensor *, tensor self, tensor other); void atg__test_string_default(tensor *, tensor dummy, char* a_ptr, int a_len, char* b_ptr, int b_len); +void atg__test_warn_in_autograd(tensor *, tensor self); void atg__to_copy(tensor *, tensor self, int options_kind, int options_device, int non_blocking); tensor *atg__to_cpu(tensor *tensors_data, int tensors_len); +void atg__torch_cuda_cu_linker_symbol_op(tensor *, tensor self); void atg__trilinear(tensor *, tensor i1, tensor i2, tensor i3, int64_t *expand1_data, int expand1_len, int64_t *expand2_data, int expand2_len, int64_t *expand3_data, int expand3_len, int64_t *sumdim_data, int sumdim_len, int64_t unroll_dim); void atg__unique(tensor *, tensor self, int sorted, int return_inverse); void atg__unique2(tensor *, tensor self, int sorted, int return_inverse, int return_counts); void atg__unpack_dual(tensor *, tensor dual, int64_t level); void atg__unsafe_view(tensor *, tensor self, int64_t *size_data, int size_len); +void atg__upsample_bicubic2d_aa(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_bicubic2d_aa_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_bicubic2d_aa_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_bicubic2d_aa_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_bilinear2d_aa(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_bilinear2d_aa_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_bilinear2d_aa_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_bilinear2d_aa_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null); +void atg__upsample_nearest_exact1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null); +void atg__upsample_nearest_exact1d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null); +void atg__upsample_nearest_exact1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null); +void atg__upsample_nearest_exact2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg__upsample_nearest_exact3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); int atg__use_cudnn_ctc_loss(tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank); int atg__use_cudnn_rnn_flatten_weight(); void atg__values(tensor *, tensor self); @@ -250,6 +280,7 @@ void atg_addmv_out(tensor *, tensor out, tensor self, tensor mat, tensor vec); void atg_addr(tensor *, tensor self, tensor vec1, tensor vec2); void atg_addr_(tensor *, tensor self, tensor vec1, tensor vec2); void atg_addr_out(tensor *, tensor out, tensor self, tensor vec1, tensor vec2); +void atg_adjoint(tensor *, tensor self); void atg_affine_grid_generator(tensor *, tensor theta, int64_t *size_data, int size_len, int align_corners); void atg_affine_grid_generator_backward(tensor *, tensor grad, int64_t *size_data, int size_len, int align_corners); void atg_alias(tensor *, tensor self); @@ -292,6 +323,9 @@ void atg_arcsinh(tensor *, tensor self); void atg_arcsinh_(tensor *, tensor self); void atg_arcsinh_out(tensor *, tensor out, tensor self); void atg_arctan(tensor *, tensor self); +void atg_arctan2(tensor *, tensor self, tensor other); +void atg_arctan2_(tensor *, tensor self, tensor other); +void atg_arctan2_out(tensor *, tensor out, tensor self, tensor other); void atg_arctan_(tensor *, tensor self); void atg_arctan_out(tensor *, tensor out, tensor self); void atg_arctanh(tensor *, tensor self); @@ -302,6 +336,7 @@ void atg_argmax_out(tensor *, tensor out, tensor self, int64_t dim_v, uint8_t di void atg_argmin(tensor *, tensor self, int64_t dim_v, uint8_t dim_null, int keepdim); void atg_argmin_out(tensor *, tensor out, tensor self, int64_t dim_v, uint8_t dim_null, int keepdim); void atg_argsort(tensor *, tensor self, int64_t dim, int descending); +void atg_argwhere(tensor *, tensor self); void atg_as_strided(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset_v, uint8_t storage_offset_null); void atg_as_strided_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset_v, uint8_t storage_offset_null); void atg_asin(tensor *, tensor self); @@ -479,7 +514,6 @@ void atg_conv2d_padding(tensor *, tensor input, tensor weight, tensor bias, int6 void atg_conv3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); void atg_conv3d_padding(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char* padding_ptr, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); void atg_conv_depthwise3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); -void atg_conv_depthwise3d_backward(tensor *, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); void atg_conv_tbc(tensor *, tensor self, tensor weight, tensor bias, int64_t pad); void atg_conv_tbc_backward(tensor *, tensor self, tensor input, tensor weight, tensor bias, int64_t pad); void atg_conv_transpose1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); @@ -518,16 +552,8 @@ void atg_cudnn_batch_norm(tensor *, tensor input, tensor weight, tensor bias, te void atg_cudnn_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace); void atg_cudnn_convolution(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); void atg_cudnn_convolution_add_relu(tensor *, tensor self, tensor weight, tensor z, scalar alpha, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -void atg_cudnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); -void atg_cudnn_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); -void atg_cudnn_convolution_deprecated(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_cudnn_convolution_deprecated2(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); void atg_cudnn_convolution_relu(tensor *, tensor self, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); void atg_cudnn_convolution_transpose(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); -void atg_cudnn_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); -void atg_cudnn_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); -void atg_cudnn_convolution_transpose_deprecated(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_cudnn_convolution_transpose_deprecated2(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); void atg_cudnn_grid_sampler(tensor *, tensor self, tensor grid); void atg_cudnn_grid_sampler_backward(tensor *, tensor self, tensor grid, tensor grad_output); int atg_cudnn_is_acceptable(tensor self); @@ -562,6 +588,7 @@ void atg_diag_out(tensor *, tensor out, tensor self, int64_t diagonal); void atg_diagflat(tensor *, tensor self, int64_t offset); void atg_diagonal(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); void atg_diagonal_backward(tensor *, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t offset, int64_t dim1, int64_t dim2); +void atg_diagonal_scatter(tensor *, tensor self, tensor src, int64_t offset, int64_t dim1, int64_t dim2); void atg_diff(tensor *, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append); void atg_diff_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append); void atg_digamma(tensor *, tensor self); @@ -676,7 +703,11 @@ void atg_fft_fftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *di void atg_fft_fftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_fftshift(tensor *, tensor self, int64_t *dim_data, int dim_len); void atg_fft_hfft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_hfft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); +void atg_fft_hfft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_hfft_out(tensor *, tensor out, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_hfftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); +void atg_fft_hfftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_ifft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); void atg_fft_ifft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_ifft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); @@ -685,7 +716,11 @@ void atg_fft_ifftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *d void atg_fft_ifftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_ifftshift(tensor *, tensor self, int64_t *dim_data, int dim_len); void atg_fft_ihfft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_ihfft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); +void atg_fft_ihfft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_ihfft_out(tensor *, tensor out, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_ihfftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); +void atg_fft_ihfftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_irfft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); void atg_fft_irfft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fft_irfft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); @@ -798,7 +833,6 @@ void atg_greater_tensor_(tensor *, tensor self, tensor other); void atg_greater_tensor_out(tensor *, tensor out, tensor self, tensor other); void atg_grid_sampler(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg_grid_sampler_2d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); -void atg_grid_sampler_2d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg_grid_sampler_3d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg_grid_sampler_3d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg_group_norm(tensor *, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled); @@ -871,8 +905,7 @@ void atg_imag(tensor *, tensor self); void atg_index(tensor *, tensor self, tensor *indices_data, int indices_len); void atg_index_add(tensor *, tensor self, int64_t dim, tensor index, tensor source); void atg_index_add_(tensor *, tensor self, int64_t dim, tensor index, tensor source); -void atg_index_add_alpha(tensor *, tensor self, int64_t dim, tensor index, tensor source, scalar alpha); -void atg_index_add_alpha_(tensor *, tensor self, int64_t dim, tensor index, tensor source, scalar alpha); +void atg_index_add_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, tensor source); void atg_index_copy(tensor *, tensor self, int64_t dim, tensor index, tensor source); void atg_index_copy_(tensor *, tensor self, int64_t dim, tensor index, tensor source); void atg_index_fill(tensor *, tensor self, int64_t dim, tensor index, scalar value); @@ -982,8 +1015,11 @@ void atg_linalg_cond(tensor *, tensor self, scalar p); void atg_linalg_cond_out(tensor *, tensor out, tensor self, scalar p); void atg_linalg_cond_p_str(tensor *, tensor self, char* p_ptr, int p_len); void atg_linalg_cond_p_str_out(tensor *, tensor out, tensor self, char* p_ptr, int p_len); +void atg_linalg_cross(tensor *, tensor self, tensor other, int64_t dim); +void atg_linalg_cross_out(tensor *, tensor out, tensor self, tensor other, int64_t dim); void atg_linalg_det(tensor *, tensor self); void atg_linalg_det_out(tensor *, tensor out, tensor self); +void atg_linalg_diagonal(tensor *, tensor A, int64_t offset, int64_t dim1, int64_t dim2); void atg_linalg_eig(tensor *, tensor self); void atg_linalg_eig_out(tensor *, tensor eigenvalues, tensor eigenvectors, tensor self); void atg_linalg_eigh(tensor *, tensor self, char* UPLO_ptr, int UPLO_len); @@ -1000,12 +1036,21 @@ void atg_linalg_inv_ex_inverse(tensor *, tensor inverse, tensor info, tensor sel void atg_linalg_inv_out(tensor *, tensor out, tensor self); void atg_linalg_lstsq(tensor *, tensor self, tensor b, double rcond_v, uint8_t rcond_null, char* driver_ptr, int driver_len); void atg_linalg_lstsq_out(tensor *, tensor solution, tensor residuals, tensor rank, tensor singular_values, tensor self, tensor b, double rcond_v, uint8_t rcond_null, char* driver_ptr, int driver_len); +void atg_linalg_lu_factor(tensor *, tensor A, int pivot); +void atg_linalg_lu_factor_ex(tensor *, tensor A, int pivot, int check_errors); +void atg_linalg_lu_factor_ex_out(tensor *, tensor LU, tensor pivots, tensor info, tensor A, int pivot, int check_errors); +void atg_linalg_lu_factor_out(tensor *, tensor LU, tensor pivots, tensor A, int pivot); void atg_linalg_matmul(tensor *, tensor self, tensor other); void atg_linalg_matmul_out(tensor *, tensor out, tensor self, tensor other); +void atg_linalg_matrix_exp(tensor *, tensor self); void atg_linalg_matrix_power(tensor *, tensor self, int64_t n); void atg_linalg_matrix_power_out(tensor *, tensor out, tensor self, int64_t n); -void atg_linalg_matrix_rank(tensor *, tensor self, double tol_v, uint8_t tol_null, int hermitian); -void atg_linalg_matrix_rank_out(tensor *, tensor out, tensor self, double tol_v, uint8_t tol_null, int hermitian); +void atg_linalg_matrix_rank(tensor *, tensor self, double tol, int hermitian); +void atg_linalg_matrix_rank_atol_rtol_float(tensor *, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian); +void atg_linalg_matrix_rank_atol_rtol_float_out(tensor *, tensor out, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian); +void atg_linalg_matrix_rank_atol_rtol_tensor(tensor *, tensor input, tensor atol, tensor rtol, int hermitian); +void atg_linalg_matrix_rank_atol_rtol_tensor_out(tensor *, tensor out, tensor input, tensor atol, tensor rtol, int hermitian); +void atg_linalg_matrix_rank_out(tensor *, tensor out, tensor self, double tol, int hermitian); void atg_linalg_matrix_rank_out_tol_tensor(tensor *, tensor out, tensor input, tensor tol, int hermitian); void atg_linalg_matrix_rank_tol_tensor(tensor *, tensor input, tensor tol, int hermitian); void atg_linalg_multi_dot(tensor *, tensor *tensors_data, int tensors_len); @@ -1015,6 +1060,10 @@ void atg_linalg_norm_ord_str(tensor *, tensor self, char* ord_ptr, int ord_len, void atg_linalg_norm_ord_str_out(tensor *, tensor out, tensor self, char* ord_ptr, int ord_len, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_linalg_norm_out(tensor *, tensor out, tensor self, scalar ord, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_linalg_pinv(tensor *, tensor self, double rcond, int hermitian); +void atg_linalg_pinv_atol_rtol_float(tensor *, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian); +void atg_linalg_pinv_atol_rtol_float_out(tensor *, tensor out, tensor self, double atol_v, uint8_t atol_null, double rtol_v, uint8_t rtol_null, int hermitian); +void atg_linalg_pinv_atol_rtol_tensor(tensor *, tensor self, tensor atol, tensor rtol, int hermitian); +void atg_linalg_pinv_atol_rtol_tensor_out(tensor *, tensor out, tensor self, tensor atol, tensor rtol, int hermitian); void atg_linalg_pinv_out(tensor *, tensor out, tensor self, double rcond, int hermitian); void atg_linalg_pinv_out_rcond_tensor(tensor *, tensor out, tensor self, tensor rcond, int hermitian); void atg_linalg_pinv_rcond_tensor(tensor *, tensor self, tensor rcond, int hermitian); @@ -1024,18 +1073,20 @@ void atg_linalg_slogdet(tensor *, tensor self); void atg_linalg_slogdet_out(tensor *, tensor sign, tensor logabsdet, tensor self); void atg_linalg_solve(tensor *, tensor input, tensor other); void atg_linalg_solve_out(tensor *, tensor out, tensor input, tensor other); -void atg_linalg_svd(tensor *, tensor self, int full_matrices); -void atg_linalg_svd_u(tensor *, tensor U, tensor S, tensor Vh, tensor self, int full_matrices); -void atg_linalg_svdvals(tensor *, tensor input); -void atg_linalg_svdvals_out(tensor *, tensor out, tensor input); +void atg_linalg_solve_triangular(tensor *, tensor self, tensor B, int upper, int left, int unitriangular); +void atg_linalg_solve_triangular_out(tensor *, tensor out, tensor self, tensor B, int upper, int left, int unitriangular); +void atg_linalg_svd(tensor *, tensor A, int full_matrices); +void atg_linalg_svd_u(tensor *, tensor U, tensor S, tensor Vh, tensor A, int full_matrices); +void atg_linalg_svdvals(tensor *, tensor A); +void atg_linalg_svdvals_out(tensor *, tensor out, tensor A); void atg_linalg_tensorinv(tensor *, tensor self, int64_t ind); void atg_linalg_tensorinv_out(tensor *, tensor out, tensor self, int64_t ind); void atg_linalg_tensorsolve(tensor *, tensor self, tensor other, int64_t *dims_data, int dims_len); void atg_linalg_tensorsolve_out(tensor *, tensor out, tensor self, tensor other, int64_t *dims_data, int dims_len); void atg_linear(tensor *, tensor input, tensor weight, tensor bias); void atg_linear_out(tensor *, tensor out, tensor input, tensor weight, tensor bias); -void atg_linspace(tensor *, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, int options_kind, int options_device); -void atg_linspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null); +void atg_linspace(tensor *, scalar start, scalar end, int64_t steps, int options_kind, int options_device); +void atg_linspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps); void atg_log(tensor *, tensor self); void atg_log10(tensor *, tensor self); void atg_log10_(tensor *, tensor self); @@ -1078,8 +1129,8 @@ void atg_logit_(tensor *, tensor self, double eps_v, uint8_t eps_null); void atg_logit_backward(tensor *, tensor grad_output, tensor self, double eps_v, uint8_t eps_null); void atg_logit_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, double eps_v, uint8_t eps_null); void atg_logit_out(tensor *, tensor out, tensor self, double eps_v, uint8_t eps_null); -void atg_logspace(tensor *, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base, int options_kind, int options_device); -void atg_logspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base); +void atg_logspace(tensor *, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device); +void atg_logspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps, double base); void atg_logsumexp(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); void atg_logsumexp_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); void atg_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); @@ -1111,6 +1162,7 @@ void atg_matmul(tensor *, tensor self, tensor other); void atg_matmul_out(tensor *, tensor out, tensor self, tensor other); void atg_matrix_exp(tensor *, tensor self); void atg_matrix_exp_backward(tensor *, tensor self, tensor grad); +void atg_matrix_h(tensor *, tensor self); void atg_matrix_power(tensor *, tensor self, int64_t n); void atg_matrix_power_out(tensor *, tensor out, tensor self, int64_t n); void atg_matrix_rank(tensor *, tensor self, int symmetric); @@ -1150,6 +1202,7 @@ void atg_median_dim(tensor *, tensor self, int64_t dim, int keepdim); void atg_median_dim_values(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); tensor *atg_meshgrid(tensor *tensors_data, int tensors_len); tensor *atg_meshgrid_indexing(tensor *tensors_data, int tensors_len, char* indexing_ptr, int indexing_len); +void atg_mh(tensor *, tensor self); void atg_min(tensor *, tensor self); void atg_min_dim(tensor *, tensor self, int64_t dim, int keepdim); void atg_min_dim_min(tensor *, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim); @@ -1160,15 +1213,8 @@ void atg_minimum_out(tensor *, tensor out, tensor self, tensor other); void atg_miopen_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); void atg_miopen_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon); void atg_miopen_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_miopen_convolution_backward_bias(tensor *, tensor grad_output); -void atg_miopen_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_miopen_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); void atg_miopen_convolution_transpose(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_miopen_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_miopen_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); void atg_miopen_depthwise_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_miopen_depthwise_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_miopen_depthwise_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); void atg_miopen_rnn(tensor *, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state); void atg_mish(tensor *, tensor self); void atg_mish_(tensor *, tensor self); @@ -1177,8 +1223,6 @@ void atg_mish_out(tensor *, tensor out, tensor self); void atg_mkldnn_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); void atg_mkldnn_adaptive_avg_pool2d_backward(tensor *, tensor grad_output, tensor self); void atg_mkldnn_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); -void atg_mkldnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); -void atg_mkldnn_convolution_backward_weights(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); void atg_mkldnn_linear(tensor *, tensor self, tensor weight, tensor bias); void atg_mkldnn_linear_backward_input(tensor *, int64_t *input_size_data, int input_size_len, tensor grad_output, tensor weight); void atg_mkldnn_linear_backward_weights(tensor *, tensor grad_output, tensor input, tensor weight, int bias_defined); @@ -1202,6 +1246,7 @@ void atg_mse_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_o void atg_mse_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); void atg_msort(tensor *, tensor self); void atg_msort_out(tensor *, tensor out, tensor self); +void atg_mt(tensor *, tensor self); void atg_mul(tensor *, tensor self, tensor other); void atg_mul_(tensor *, tensor self, tensor other); void atg_mul_out(tensor *, tensor out, tensor self, tensor other); @@ -1233,14 +1278,10 @@ void atg_nanmean_out(tensor *, tensor out, tensor self, int64_t *dim_data, int d void atg_nanmedian(tensor *, tensor self); void atg_nanmedian_dim(tensor *, tensor self, int64_t dim, int keepdim); void atg_nanmedian_dim_values(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); -void atg_nanquantile(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); -void atg_nanquantile_new(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_nanquantile_new_out(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_nanquantile_new_scalar(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_nanquantile_new_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_nanquantile_out(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); -void atg_nanquantile_scalar(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); -void atg_nanquantile_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_nanquantile(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); +void atg_nanquantile_out(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); +void atg_nanquantile_scalar(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); +void atg_nanquantile_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); void atg_nansum(tensor *, tensor self, int dtype); void atg_nansum_dim_intlist(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_nansum_intlist_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); @@ -1250,6 +1291,9 @@ void atg_narrow_copy_out(tensor *, tensor out, tensor self, int64_t dim, int64_t void atg_narrow_tensor(tensor *, tensor self, int64_t dim, tensor start, int64_t length); void atg_native_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); void atg_native_batch_norm_out(tensor *, tensor out, tensor save_mean, tensor save_invstd, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); +void atg_native_channel_shuffle(tensor *, tensor self, int64_t groups); +void atg_native_dropout(tensor *, tensor input, double p, int train); +void atg_native_dropout_backward(tensor *, tensor grad_output, tensor mask, double scale); void atg_native_group_norm(tensor *, tensor input, tensor weight, tensor bias, int64_t n, int64_t C, int64_t HxW, int64_t group, double eps); void atg_native_layer_norm(tensor *, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps); void atg_native_norm(tensor *, tensor self); @@ -1358,16 +1402,13 @@ double atg_q_scale(tensor self); int64_t atg_q_zero_point(tensor self); void atg_qr(tensor *, tensor self, int some); void atg_qr_q(tensor *, tensor Q, tensor R, tensor self, int some); -void atg_quantile(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); -void atg_quantile_new(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_quantile_new_out(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_quantile_new_scalar(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_quantile_new_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); -void atg_quantile_out(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); -void atg_quantile_scalar(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); -void atg_quantile_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_quantile(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); +void atg_quantile_out(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); +void atg_quantile_scalar(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); +void atg_quantile_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim, char* interpolation_ptr, int interpolation_len); void atg_quantize_per_channel(tensor *, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype); void atg_quantize_per_tensor(tensor *, tensor self, double scale, int64_t zero_point, int dtype); +void atg_quantize_per_tensor_dynamic(tensor *, tensor self, int dtype, int reduce_range); void atg_quantize_per_tensor_tensor_qparams(tensor *, tensor self, tensor scale, tensor zero_point, int dtype); tensor *atg_quantize_per_tensor_tensors(tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype); void atg_quantized_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor var, double eps, double output_scale, int64_t output_zero_point); @@ -1466,6 +1507,9 @@ void atg_roll(tensor *, tensor self, int64_t *shifts_data, int shifts_len, int64 void atg_rot90(tensor *, tensor self, int64_t k, int64_t *dims_data, int dims_len); void atg_round(tensor *, tensor self); void atg_round_(tensor *, tensor self); +void atg_round_decimals(tensor *, tensor self, int64_t decimals); +void atg_round_decimals_(tensor *, tensor self, int64_t decimals); +void atg_round_decimals_out(tensor *, tensor out, tensor self, int64_t decimals); void atg_round_out(tensor *, tensor out, tensor self); void atg_row_stack(tensor *, tensor *tensors_data, int tensors_len); void atg_row_stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); @@ -1496,12 +1540,13 @@ void atg_scatter_value_out(tensor *, tensor out, tensor self, int64_t dim, tenso void atg_scatter_value_reduce(tensor *, tensor self, int64_t dim, tensor index, scalar value, char* reduce_ptr, int reduce_len); void atg_scatter_value_reduce_(tensor *, tensor self, int64_t dim, tensor index, scalar value, char* reduce_ptr, int reduce_len); void atg_scatter_value_reduce_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, scalar value, char* reduce_ptr, int reduce_len); -void atg_searchsorted(tensor *, tensor sorted_sequence, tensor self, int out_int32, int right); -void atg_searchsorted_scalar(tensor *, tensor sorted_sequence, scalar self_scalar, int out_int32, int right); -void atg_searchsorted_tensor_out(tensor *, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right); +void atg_searchsorted(tensor *, tensor sorted_sequence, tensor self, int out_int32, int right, char* side_ptr, int side_len, tensor sorter); +void atg_searchsorted_scalar(tensor *, tensor sorted_sequence, scalar self_scalar, int out_int32, int right, char* side_ptr, int side_len, tensor sorter); +void atg_searchsorted_tensor_out(tensor *, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right, char* side_ptr, int side_len, tensor sorter); void atg_segment_reduce(tensor *, tensor data, char* reduce_ptr, int reduce_len, tensor lengths, tensor indices, int64_t axis, int unsafe, scalar initial); void atg_select(tensor *, tensor self, int64_t dim, int64_t index); void atg_select_backward(tensor *, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t index); +void atg_select_scatter(tensor *, tensor self, tensor src, int64_t dim, int64_t index); void atg_selu(tensor *, tensor self); void atg_selu_(tensor *, tensor self); void atg_set_(tensor *, tensor self); @@ -1536,6 +1581,7 @@ void atg_sinh_(tensor *, tensor self); void atg_sinh_out(tensor *, tensor out, tensor self); void atg_slice(tensor *, tensor self, int64_t dim, int64_t start_v, uint8_t start_null, int64_t end_v, uint8_t end_null, int64_t step); void atg_slice_backward(tensor *, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t start, int64_t end, int64_t step); +void atg_slice_scatter(tensor *, tensor self, tensor src, int64_t dim, int64_t start_v, uint8_t start_null, int64_t end_v, uint8_t end_null, int64_t step); void atg_slogdet(tensor *, tensor self); void atg_slow_conv3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); void atg_slow_conv3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); @@ -1556,8 +1602,8 @@ void atg_soft_margin_loss_backward_grad_input(tensor *, tensor grad_input, tenso void atg_soft_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); void atg_softmax(tensor *, tensor self, int64_t dim, int dtype); void atg_softplus(tensor *, tensor self); -void atg_softplus_backward(tensor *, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); -void atg_softplus_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); +void atg_softplus_backward(tensor *, tensor grad_output, tensor self, scalar beta, scalar threshold); +void atg_softplus_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold); void atg_softplus_out(tensor *, tensor out, tensor self); void atg_softshrink(tensor *, tensor self); void atg_softshrink_backward(tensor *, tensor grad_output, tensor self, scalar lambd); @@ -1578,6 +1624,8 @@ int64_t atg_sparse_dim(tensor self); void atg_sparse_mask(tensor *, tensor self, tensor mask); void atg_sparse_resize_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); void atg_sparse_resize_and_clear_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); +void atg_sparse_sampled_addmm(tensor *, tensor self, tensor mat1, tensor mat2); +void atg_sparse_sampled_addmm_out(tensor *, tensor out, tensor self, tensor mat1, tensor mat2); void atg_special_digamma(tensor *, tensor self); void atg_special_digamma_out(tensor *, tensor out, tensor self); void atg_special_entr(tensor *, tensor self); @@ -1627,10 +1675,11 @@ void atg_special_polygamma(tensor *, int64_t n, tensor self); void atg_special_polygamma_out(tensor *, tensor out, int64_t n, tensor self); void atg_special_psi(tensor *, tensor self); void atg_special_psi_out(tensor *, tensor out, tensor self); -void atg_special_round(tensor *, tensor self); -void atg_special_round_out(tensor *, tensor out, tensor self); +void atg_special_round(tensor *, tensor self, int64_t decimals); +void atg_special_round_out(tensor *, tensor out, tensor self, int64_t decimals); void atg_special_sinc(tensor *, tensor self); void atg_special_sinc_out(tensor *, tensor out, tensor self); +void atg_special_softmax(tensor *, tensor self, int64_t dim, int dtype); void atg_special_xlog1py(tensor *, tensor self, tensor other); void atg_special_xlog1py_other_scalar(tensor *, tensor self, scalar other); void atg_special_xlog1py_other_scalar_out(tensor *, tensor out, tensor self, scalar other); diff --git a/setup-gotch.sh b/setup-gotch.sh index 901f64d..b7e5472 100644 --- a/setup-gotch.sh +++ b/setup-gotch.sh @@ -1,7 +1,7 @@ #!/bin/bash GOTCH_VERSION="${GOTCH_VER:-v0.6.2}" -CUDA_VERSION="${CUDA_VER:-11.1}" +CUDA_VERSION="${CUDA_VER:-11.3}" if [ -z $GOPATH ] then $GOPATH="$HOME/go" diff --git a/setup-libtorch.sh b/setup-libtorch.sh index 1636930..f7ee8e0 100644 --- a/setup-libtorch.sh +++ b/setup-libtorch.sh @@ -1,7 +1,7 @@ #!/bin/bash -LIBTORCH_VERSION="${LIBTORCH_VER:-1.10.0}" -CUDA_VERSION="${CUDA_VER:-11.1}" +LIBTORCH_VERSION="${LIBTORCH_VER:-1.11.0}" +CUDA_VERSION="${CUDA_VER:-11.3}" if [ "${CUDA_VERSION}"=="cpu" ]; then CU_VERSION="cpu" diff --git a/ts/must-tensor-generated.go b/ts/must-tensor-generated.go index d3ccdaf..9f3c970 100644 --- a/ts/must-tensor-generated.go +++ b/ts/must-tensor-generated.go @@ -2,18120 +2,14890 @@ package ts // NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -import ( - "log" +import( + "log" - "github.com/sugarme/gotch" + "github.com/sugarme/gotch" ) -func (ts *Tensor) Must__And_(other *Scalar) { - err := ts.__And_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__AndTensor_(other *Tensor) { - - err := ts.__AndTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Iand_(other *Scalar) { - - err := ts.__Iand_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__IandTensor_(other *Tensor) { - - err := ts.__IandTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ilshift_(other *Scalar) { - - err := ts.__Ilshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__IlshiftTensor_(other *Tensor) { - - err := ts.__IlshiftTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ior_(other *Scalar) { - - err := ts.__Ior_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__IorTensor_(other *Tensor) { - - err := ts.__IorTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Irshift_(other *Scalar) { - - err := ts.__Irshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__IrshiftTensor_(other *Tensor) { - - err := ts.__IrshiftTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ixor_(other *Scalar) { - - err := ts.__Ixor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__IxorTensor_(other *Tensor) { - - err := ts.__IxorTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Lshift_(other *Scalar) { - - err := ts.__Lshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__LshiftTensor_(other *Tensor) { - - err := ts.__LshiftTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Or_(other *Scalar) { - - err := ts.__Or_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__OrTensor_(other *Tensor) { - - err := ts.__OrTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Rshift_(other *Scalar) { - - err := ts.__Rshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__RshiftTensor_(other *Tensor) { - - err := ts.__RshiftTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Xor_(other *Scalar) { - - err := ts.__Xor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__XorTensor_(other *Tensor) { - - err := ts.__XorTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._AdaptiveAvgPool2d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._AdaptiveAvgPool3d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._AdaptiveAvgPool3dBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AddBatchDim(batchDim int64, level int64, del bool) (retVal *Tensor) { - - retVal, err := ts._AddBatchDim(batchDim, level, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AddRelu(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._AddRelu(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AddRelu_(other *Tensor) { - - err := ts._AddRelu_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_AddReluOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._AddReluOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AddReluScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts._AddReluScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AddReluScalar_(other *Scalar) { - - err := ts._AddReluScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_Aminmax(del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._Aminmax(del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_AminmaxDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._AminmaxDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) { - - err := ts._AmpUpdateScale_(growthTracker, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor) { - - err := ts._BaddbmmMkl_(batch1, batch2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_CastByte(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastByte(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastChar(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastChar(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastDouble(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastDouble(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastFloat(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastFloat(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastHalf(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastHalf(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastInt(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastInt(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastLong(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastLong(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastShort(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastShort(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_Cat(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _Cat(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _CatOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor) (retVal *Tensor) { - - retVal, err := _CdistBackward(grad, x1, x2, p, cdist) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CholeskySolveHelper(a *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CholeskySolveHelper(a, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Coalesce(del bool) (retVal *Tensor) { - - retVal, err := ts._Coalesce(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Coalesced_(coalesced bool) { - - err := ts._Coalesced_(coalesced) - if err != nil { - log.Fatal(err) - } - - return -} - -func Must_ComputeLinearCombination(input *Tensor, coefficients *Tensor) (retVal *Tensor) { - - retVal, err := _ComputeLinearCombination(input, coefficients) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor) (retVal *Tensor) { - - retVal, err := _ComputeLinearCombinationOut(out, input, coefficients) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Conj(del bool) (retVal *Tensor) { - - retVal, err := ts._Conj(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ConjPhysical(del bool) (retVal *Tensor) { - - retVal, err := ts._ConjPhysical(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._ConvDepthwise2d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ConvDepthwise2dBackward(gradInput *Tensor, gradWeight *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._ConvDepthwise2dBackward(gradInput, gradWeight, gradOutput, weight, kernelSize, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._ConvDepthwise2dOut(out, weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool) (retVal *Tensor) { - - retVal, err := ts._ConvertIndicesFromCooToCsr(size, outInt32, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool) (retVal *Tensor) { - - retVal, err := ts._ConvertIndicesFromCooToCsrOut(out, size, outInt32, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool) (retVal *Tensor) { - - retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled, allowTf32) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := _ConvolutionDeprecated(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := _ConvolutionMode(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64) (retVal *Tensor) { - - retVal, err := _ConvolutionNogroup(input, weight, bias, stride, padding, dilation, transposed, outputPadding) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CopyFrom(dst *Tensor, nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CopyFrom(dst, nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CopyFromAndResize(dst *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._CopyFromAndResize(dst, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func Must_CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool) (retVal *Tensor) { - - retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _CudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank, deterministic, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CudnnRnn(input *Tensor, weight []Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, retVal4, err := _CudnnRnn(input, weight, weightStride0, weightBuf, hx, cx, mode, hiddenSize, projSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3, retVal4 -} - -func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool) (retVal *Tensor) { - - retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, batchFirst, bidirectional) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CufftGetPlanCacheMaxSize(deviceIndex int64) (retVal int64) { - - retVal, err := _CufftGetPlanCacheMaxSize(deviceIndex) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CufftGetPlanCacheSize(deviceIndex int64) (retVal int64) { - - retVal, err := _CufftGetPlanCacheSize(deviceIndex) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_DebugHasInternalOverlap(del bool) (retVal int64) { - - retVal, err := ts._DebugHasInternalOverlap(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_DetLuBasedHelper(del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts._DetLuBasedHelper(del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) Must_DetLuBasedHelperBackwardHelper(detGrad *Tensor, det *Tensor, lu *Tensor, pivs *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._DetLuBasedHelperBackwardHelper(detGrad, det, lu, pivs, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_DimArange(like *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _DimArange(like, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Dimi(del bool) (retVal int64) { - - retVal, err := ts._Dimi(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Dimv(del bool) (retVal int64) { - - retVal, err := ts._Dimv(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor) (retVal *Tensor) { - - retVal, err := _DirichletGrad(x, alpha, total) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func Must_EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor) { - - retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights, paddingIdx) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor) { - - retVal, err := _EmbeddingBagDenseBackward(grad, indices, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBagForwardOnly(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func Must_EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64) (retVal *Tensor) { - - retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode, paddingIdx) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor) { - - retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64) (retVal *Tensor) { - - retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EuclideanDist(x1 *Tensor, x2 *Tensor) (retVal *Tensor) { - - retVal, err := _EuclideanDist(x1, x2) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal *Tensor) { - - retVal, err := ts._FakeQuantizeLearnablePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal *Tensor) { - - retVal, err := ts._FakeQuantizeLearnablePerTensorAffine(scale, zeroPoint, quantMin, quantMax, gradFactor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, gradFactor, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) Must_FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._FakeQuantizePerTensorAffineCachemaskTensorQparams(scale, zeroPoint, fakeQuantEnabled, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_FftC2c(dim []int64, normalization int64, forward bool, del bool) (retVal *Tensor) { - - retVal, err := ts._FftC2c(dim, normalization, forward, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool) (retVal *Tensor) { - - retVal, err := ts._FftC2cOut(out, dim, normalization, forward, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool) (retVal *Tensor) { - - retVal, err := ts._FftC2r(dim, normalization, lastDimSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool) (retVal *Tensor) { - - retVal, err := ts._FftC2rOut(out, dim, normalization, lastDimSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FftR2c(dim []int64, normalization int64, onesided bool, del bool) (retVal *Tensor) { - - retVal, err := ts._FftR2c(dim, normalization, onesided, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool) (retVal *Tensor) { - - retVal, err := ts._FftR2cOut(out, dim, normalization, onesided, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FusedDropout(p float64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._FusedDropout(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._FusedMovingAvgObsFqHelper(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_FwPrimal(level int64, del bool) (retVal *Tensor) { - - retVal, err := ts._FwPrimal(level, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._GatherSparseBackward(dim, index, grad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := _GridSampler2dCpuFallback(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _GridSampler2dCpuFallbackBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_HasCompatibleShallowCopyType(from *Tensor, del bool) (retVal bool) { - - retVal, err := ts._HasCompatibleShallowCopyType(from, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_IndexCopy_(dim int64, index *Tensor, source *Tensor) { - - err := ts._IndexCopy_(dim, index, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_Indices(del bool) (retVal *Tensor) { - - retVal, err := ts._Indices(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_InverseHelper(del bool) (retVal *Tensor) { - - retVal, err := ts._InverseHelper(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LinalgInvOutHelper_(infosLu *Tensor, infosGetri *Tensor) { - - err := ts._LinalgInvOutHelper_(infosLu, infosGetri) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_LinalgQrHelper(mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._LinalgQrHelper(mode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._LogSoftmax(dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._LogSoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._LogSoftmaxBackwardDataOut(out, gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._LogSoftmaxOut(out, dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Logcumsumexp(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._Logcumsumexp(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._LogcumsumexpOut(out, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LuWithInfo(pivot bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts._LuWithInfo(pivot, checkErrors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func Must_MakeDual(primal *Tensor, tangent *Tensor, level int64) (retVal *Tensor) { - - retVal, err := _MakeDual(primal, tangent, level) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MaskedScale(mask *Tensor, scale float64, del bool) (retVal *Tensor) { - - retVal, err := ts._MaskedScale(mask, scale, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MkldnnReshape(shape []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MkldnnReshape(shape, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MkldnnTranspose(dim0, dim1, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64) { - - err := ts._MkldnnTranspose_(dim0, dim1) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_NegView(del bool) (retVal *Tensor) { - - retVal, err := ts._NegView(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_NnpackAvailable() (retVal bool) { - - retVal, err := _NnpackAvailable() - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_NnpackSpatialConvolutionBackwardInput(input *Tensor, gradOutput *Tensor, weight *Tensor, padding []int64) (retVal *Tensor) { - - retVal, err := _NnpackSpatialConvolutionBackwardInput(input, gradOutput, weight, padding) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int64, gradOutput *Tensor, padding []int64) (retVal *Tensor) { - - retVal, err := _NnpackSpatialConvolutionBackwardWeight(input, weightsize, gradOutput, padding) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Nnz(del bool) (retVal int64) { - - retVal, err := ts._Nnz(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _PackPaddedSequence(input, lengths, batchFirst) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func Must_PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool) (retVal *Tensor) { - - retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _PadPackedSequence(data, batchSizes, batchFirst, paddingValue, totalLength) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._PdistBackward(grad, p, pdist, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_PinMemory(device gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts._PinMemory(device, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._RemoveBatchDim(level, batchSize, outDim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ReshapeAlias(size []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._ReshapeAlias(size, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ReshapeFromTensor(shape *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._ReshapeFromTensor(shape, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _RowwisePrune(weight, mask, compressedIndicesDtype) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_SWhere(condition *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._SWhere(condition, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SampleDirichlet(del bool) (retVal *Tensor) { - - retVal, err := ts._SampleDirichlet(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SaturateWeightToFp16(weight *Tensor) (retVal *Tensor) { - - retVal, err := _SaturateWeightToFp16(weight) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, axis int64) (retVal *Tensor) { - - retVal, err := _SegmentReduceBackward(grad, output, data, reduce, lengths, axis) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ShapeAsTensor(del bool) (retVal *Tensor) { - - retVal, err := ts._ShapeAsTensor(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _SobolEngineDraw(quasi, n, sobolstate, dimension, numGenerated, dtype) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64) { - - err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_SobolEngineInitializeState_(dimension int64) { - - err := ts._SobolEngineInitializeState_(dimension) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_SobolEngineScramble_(ltm *Tensor, dimension int64) { - - err := ts._SobolEngineScramble_(ltm, dimension) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._Softmax(dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SoftmaxBackwardDataOut(gradInput, gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._SoftmaxOut(out, dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SolveHelper(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._SolveHelper(a, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_SparseAddmm(sparse *Tensor, dense *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseAddmm(sparse, dense, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _SparseCsrTensorUnsafe(crowIndices, colIndices, values, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseLogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseLogSoftmax(dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseLogSoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseLogSoftmaxInt(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseMaskHelper(t *Tensor, maskIndices *Tensor) (retVal *Tensor) { - - retVal, err := _SparseMaskHelper(t, maskIndices) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor) { - - retVal, err := _SparseMm(sparse, dense) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSoftmax(dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSoftmaxInt(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSparseMatmul(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSparseMatmul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSum(del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSum(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSumBackward(grad *Tensor, dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSumBackward(grad, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSumDim(dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSumDim(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSumDimDtype(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSumDtype(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSumDtype(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_Stack(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _Stack(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_StackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _StackOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_StandardGamma(del bool) (retVal *Tensor) { - - retVal, err := ts._StandardGamma(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_StandardGammaGrad(output *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._StandardGammaGrad(output, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SvdHelper(some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts._SvdHelper(some, computeUv, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) Must_SymeigHelper(eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._SymeigHelper(eigenvectors, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func Must_TestAmbiguousDefaults(dummy *Tensor, a int64, b int64) (retVal *Tensor) { - - retVal, err := _TestAmbiguousDefaults(dummy, a, b) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string) (retVal *Tensor) { - - retVal, err := _TestAmbiguousDefaultsB(dummy, a, b) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_TestOptionalFilledIntlist(values *Tensor, addends []int64) (retVal *Tensor) { - - retVal, err := _TestOptionalFilledIntlist(values, addends) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_TestOptionalIntlist(values *Tensor, addends []int64) (retVal *Tensor) { - - retVal, err := _TestOptionalIntlist(values, addends) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_TestSerializationSubcmul(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._TestSerializationSubcmul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_TestStringDefault(dummy *Tensor, a string, b string) (retVal *Tensor) { - - retVal, err := _TestStringDefault(dummy, a, b) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._ToCopy(optionsKind, optionsDevice, nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64) (retVal *Tensor) { - - retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Unique(sorted bool, returnInverse bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts._Unique(sorted, returnInverse, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts._Unique2(sorted, returnInverse, returnCounts, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func Must_UnpackDual(dual *Tensor, level int64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _UnpackDual(dual, level) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) Must_UnsafeView(size []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._UnsafeView(size, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64) (retVal bool) { - - retVal, err := _UseCudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_UseCudnnRnnFlattenWeight() (retVal bool) { - - retVal, err := _UseCudnnRnnFlattenWeight() - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Values(del bool) (retVal *Tensor) { - - retVal, err := ts._Values(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Version(del bool) (retVal int64) { - - retVal, err := ts._Version(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_WeightNorm(v *Tensor, g *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _WeightNorm(v, g, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_WeightNormCudaInterface(v *Tensor, g *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _WeightNormCudaInterface(v, g, dim) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func Must_WeightNormCudaInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _WeightNormCudaInterfaceBackward(gradW, savedV, savedG, savedNorms, dim) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func Must_WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := _WeightNormDifferentiableBackward(gradW, savedV, savedG, savedNorms, dim) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAbs(del bool) (retVal *Tensor) { - - retVal, err := ts.Abs(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAbs_() { - - err := ts.Abs_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAbsOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AbsOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAbsolute(del bool) (retVal *Tensor) { - - retVal, err := ts.Absolute(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAbsolute_() { - - err := ts.Absolute_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAbsoluteOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AbsoluteOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAcos(del bool) (retVal *Tensor) { - - retVal, err := ts.Acos(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAcos_() { - - err := ts.Acos_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAcosOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AcosOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAcosh(del bool) (retVal *Tensor) { - - retVal, err := ts.Acosh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAcosh_() { - - err := ts.Acosh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAcoshOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AcoshOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool1d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool2d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool3d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dBackward(gradInput, gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool1d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.AdaptiveMaxPool1d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAdaptiveMaxPool2d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.AdaptiveMaxPool2d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool2dBackwardGradInput(gradInput, gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.AdaptiveMaxPool2dOut(out, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAdaptiveMaxPool3d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.AdaptiveMaxPool3d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool3dBackwardGradInput(gradInput, gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.AdaptiveMaxPool3dOut(out, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAdd(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Add(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdd_(other *Tensor) { - - err := ts.Add_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.AddScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddScalar_(other *Scalar) { - - err := ts.AddScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addbmm(batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddbmm_(batch1 *Tensor, batch2 *Tensor) { - - err := ts.Addbmm_(batch1, batch2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddbmmOut(out, batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addcdiv(tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcdiv_(tensor1 *Tensor, tensor2 *Tensor) { - - err := ts.Addcdiv_(tensor1, tensor2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcmul(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addcmul(tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcmul_(tensor1 *Tensor, tensor2 *Tensor) { - - err := ts.Addcmul_(tensor1, tensor2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addmm(mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmm_(mat1 *Tensor, mat2 *Tensor) { - - err := ts.Addmm_(mat1, mat2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddmmOut(out, mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmv(mat *Tensor, vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addmv(mat, vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmv_(mat *Tensor, vec *Tensor) { - - err := ts.Addmv_(mat, vec) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddmvOut(out, mat, vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addr(vec1, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddr_(vec1 *Tensor, vec2 *Tensor) { - - err := ts.Addr_(vec1, vec2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddrOut(out, vec1, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustAffineGridGenerator(theta *Tensor, size []int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := AffineGridGenerator(theta, size, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustAffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAlias(del bool) (retVal *Tensor) { - - retVal, err := ts.Alias(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAlignAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AlignAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAll(del bool) (retVal *Tensor) { - - retVal, err := ts.All(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAllAllOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AllAllOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAllDim(dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AllDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AllOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAllclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal bool) { - - retVal, err := ts.Allclose(other, rtol, atol, equalNan, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := AlphaDropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAlphaDropout_(p float64, train bool) { - - err := ts.AlphaDropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAmax(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Amax(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAmaxOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AmaxOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAmin(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Amin(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAminOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AminOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAminmax(dim []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Aminmax(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.AminmaxOut(min, max, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustAngle(del bool) (retVal *Tensor) { - - retVal, err := ts.Angle(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAngleOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AngleOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAny(del bool) (retVal *Tensor) { - - retVal, err := ts.Any(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAnyAllOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AnyAllOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAnyDim(dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AnyDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAnyOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AnyOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Arange(end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArangeOut(out *Tensor, end *Scalar) (retVal *Tensor) { - - retVal, err := ArangeOut(out, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := ArangeStart(start, end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArangeStartOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor) { - - retVal, err := ArangeStartOut(out, start, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArangeStartStep(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := ArangeStartStep(start, end, step, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArccos(del bool) (retVal *Tensor) { - - retVal, err := ts.Arccos(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArccos_() { - - err := ts.Arccos_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustArccosOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ArccosOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArccosh(del bool) (retVal *Tensor) { - - retVal, err := ts.Arccosh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArccosh_() { - - err := ts.Arccosh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustArccoshOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ArccoshOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArcsin(del bool) (retVal *Tensor) { - - retVal, err := ts.Arcsin(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArcsin_() { - - err := ts.Arcsin_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustArcsinOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ArcsinOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArcsinh(del bool) (retVal *Tensor) { - - retVal, err := ts.Arcsinh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArcsinh_() { - - err := ts.Arcsinh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustArcsinhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ArcsinhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArctan(del bool) (retVal *Tensor) { - - retVal, err := ts.Arctan(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArctan_() { - - err := ts.Arctan_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustArctanOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ArctanOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArctanh(del bool) (retVal *Tensor) { - - retVal, err := ts.Arctanh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArctanh_() { - - err := ts.Arctanh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustArctanhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ArctanhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgmax(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Argmax(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.ArgmaxOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgmin(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Argmin(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgminOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.ArgminOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgsort(dim int64, descending bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Argsort(dim, descending, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsStrided(size []int64, stride []int64, storageOffset []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AsStrided(size, stride, storageOffset, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset []int64) { - - err := ts.AsStrided_(size, stride, storageOffset) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAsin(del bool) (retVal *Tensor) { - - retVal, err := ts.Asin(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsin_() { - - err := ts.Asin_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAsinOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AsinOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsinh(del bool) (retVal *Tensor) { - - retVal, err := ts.Asinh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsinh_() { - - err := ts.Asinh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAsinhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AsinhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan(del bool) (retVal *Tensor) { - - retVal, err := ts.Atan(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan2(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Atan2(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan2_(other *Tensor) { - - err := ts.Atan2_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAtan2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Atan2Out(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan_() { - - err := ts.Atan_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAtanOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AtanOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtanh(del bool) (retVal *Tensor) { - - retVal, err := ts.Atanh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtanh_() { - - err := ts.Atanh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAtanhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AtanhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtleast1d(del bool) (retVal *Tensor) { - - retVal, err := ts.Atleast1d(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtleast2d(del bool) (retVal *Tensor) { - - retVal, err := ts.Atleast2d(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtleast3d(del bool) (retVal *Tensor) { - - retVal, err := ts.Atleast3d(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBaddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Baddbmm(batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBaddbmm_(batch1 *Tensor, batch2 *Tensor) { - - err := ts.Baddbmm_(batch1, batch2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BaddbmmOut(out, batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BartlettWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor) (retVal *Tensor) { - - retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu, count) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := BatchNormBackwardReduce(gradOut, input, mean, invstd, weight, inputG, weightG, biasG) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func MustBatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor) { - - retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor) { - - retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := BatchNormGatherStats(input, mean, invstd, runningMean, runningVar, momentum, eps, count) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustBatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := BatchNormGatherStatsWithCounts(input, mean, invstd, runningMean, runningVar, momentum, eps, counts) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustBatchNormStats(input *Tensor, eps float64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := BatchNormStats(input, eps) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustBatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := BatchNormUpdateStats(input, runningMean, runningVar, momentum) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustBernoulli(del bool) (retVal *Tensor) { - - retVal, err := ts.Bernoulli(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBernoulli_(p *Tensor) { - - err := ts.Bernoulli_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBernoulliFloat_(p float64) { - - err := ts.BernoulliFloat_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBernoulliOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BernoulliOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBernoulliP(p float64, del bool) (retVal *Tensor) { - - retVal, err := ts.BernoulliP(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := Bilinear(input1, input2, weight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyBackwardGradInput(gradInput, gradOutput, target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBincount(weights *Tensor, minlength int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Bincount(weights, minlength, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBinomial(count *Tensor, prob *Tensor) (retVal *Tensor) { - - retVal, err := Binomial(count, prob) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAnd(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAnd(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAnd_(other *Scalar) { - - err := ts.BitwiseAnd_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseAndScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAndScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAndTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAndTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAndTensor_(other *Tensor) { - - err := ts.BitwiseAndTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseAndTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAndTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseLeftShift(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseLeftShift(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseLeftShift_(other *Tensor) { - - err := ts.BitwiseLeftShift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustBitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := BitwiseLeftShiftScalarTensor(selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseLeftShiftTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseLeftShiftTensorScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseLeftShiftTensorScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseLeftShiftTensorScalar_(other *Scalar) { - - err := ts.BitwiseLeftShiftTensorScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseLeftShiftTensorScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseNot(del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseNot(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseNot_() { - - err := ts.BitwiseNot_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseNotOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseNotOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOr(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOr(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOr_(other *Scalar) { - - err := ts.BitwiseOr_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseOrScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOrScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOrTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOrTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOrTensor_(other *Tensor) { - - err := ts.BitwiseOrTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseOrTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOrTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseRightShift(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseRightShift(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseRightShift_(other *Tensor) { - - err := ts.BitwiseRightShift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustBitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := BitwiseRightShiftScalarTensor(selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseRightShiftTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseRightShiftTensorScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseRightShiftTensorScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseRightShiftTensorScalar_(other *Scalar) { - - err := ts.BitwiseRightShiftTensorScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseRightShiftTensorScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXor(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXor_(other *Scalar) { - - err := ts.BitwiseXor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseXorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXorScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXorTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXorTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXorTensor_(other *Tensor) { - - err := ts.BitwiseXorTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseXorTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXorTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BlackmanWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBlockDiag(tensors []Tensor) (retVal *Tensor) { - - retVal, err := BlockDiag(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBmm(mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Bmm(mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBmmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BmmOut(out, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBroadcastTo(size []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BroadcastTo(size, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBucketize(boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Bucketize(boundaries, outInt32, right, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool) (retVal *Tensor) { - - retVal, err := BucketizeScalar(selfScalar, boundaries, outInt32, right) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor) { - - retVal, err := ts.BucketizeTensorOut(out, boundaries, outInt32, right, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCanCast(from gotch.DType, to gotch.DType) (retVal bool) { - - retVal, err := CanCast(from, to) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCartesianProd(tensors []Tensor) (retVal *Tensor) { - - retVal, err := CartesianProd(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCat(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Cat(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := CatOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCauchy_(median float64, sigma float64) { - - err := ts.Cauchy_(median, sigma) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustCdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64) (retVal *Tensor) { - - retVal, err := Cdist(x1, x2, p, computeMode) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCeil(del bool) (retVal *Tensor) { - - retVal, err := ts.Ceil(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCeil_() { - - err := ts.Ceil_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCeilOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CeilOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Celu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCelu_() { - - err := ts.Celu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustChainMatmul(matrices []Tensor) (retVal *Tensor) { - - retVal, err := ChainMatmul(matrices) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustChainMatmulOut(out *Tensor, matrices []Tensor) (retVal *Tensor) { - - retVal, err := ChainMatmulOut(out, matrices) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustChannelShuffle(groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ChannelShuffle(groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholesky(upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Cholesky(upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskyInverse(upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskyInverse(upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskyInverseOut(out *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskyInverseOut(out, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskyOut(out, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskySolve(input2 *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskySolve(input2, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskySolveOut(out, input2, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ChooseQparamsOptimized(input, numel, nBins, ratio, bitWidth) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustClamp(min *Scalar, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Clamp(min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClamp_(min *Scalar, max *Scalar) { - - err := ts.Clamp_(min, max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMax(max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMax(max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMax_(max *Scalar) { - - err := ts.ClampMax_(max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMaxOut(out *Tensor, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMaxOut(out, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMaxTensor(max *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMaxTensor(max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMaxTensor_(max *Tensor) { - - err := ts.ClampMaxTensor_(max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMaxTensorOut(out *Tensor, max *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMaxTensorOut(out, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMin(min *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMin(min, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMin_(min *Scalar) { - - err := ts.ClampMin_(min) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMinOut(out *Tensor, min *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMinOut(out, min, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMinTensor(min *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMinTensor(min, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMinTensor_(min *Tensor) { - - err := ts.ClampMinTensor_(min) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMinTensorOut(out *Tensor, min *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMinTensorOut(out, min, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampOut(out, min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampTensor(min *Tensor, max *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampTensor(min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampTensor_(min *Tensor, max *Tensor) { - - err := ts.ClampTensor_(min, max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampTensorOut(out, min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClip(min *Scalar, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Clip(min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClip_(min *Scalar, max *Scalar) { - - err := ts.Clip_(min, max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClipOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClipOut(out, min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClipTensor(min *Tensor, max *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClipTensor(min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClipTensor_(min *Tensor, max *Tensor) { - - err := ts.ClipTensor_(min, max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ClipTensorOut(out, min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCoalesce(del bool) (retVal *Tensor) { - - retVal, err := ts.Coalesce(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCol2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCol2imBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Col2imBackwardGradInput(gradInput, gradOutput, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCol2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustColIndices(del bool) (retVal *Tensor) { - - retVal, err := ts.ColIndices(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustColumnStack(tensors []Tensor) (retVal *Tensor) { - - retVal, err := ColumnStack(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustColumnStackOut(out *Tensor, tensors []Tensor) (retVal *Tensor) { - - retVal, err := ColumnStackOut(out, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCombinations(r int64, withReplacement bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Combinations(r, withReplacement, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustComplex(real *Tensor, imag *Tensor) (retVal *Tensor) { - - retVal, err := Complex(real, imag) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustComplexOut(out *Tensor, real *Tensor, imag *Tensor) (retVal *Tensor) { - - retVal, err := ComplexOut(out, real, imag) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConcat(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Concat(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConcatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := ConcatOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConj(del bool) (retVal *Tensor) { - - retVal, err := ts.Conj(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConjPhysical(del bool) (retVal *Tensor) { - - retVal, err := ts.ConjPhysical(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConjPhysical_() { - - err := ts.ConjPhysical_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustConjPhysicalOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ConjPhysicalOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConstantPadNd(pad []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ConstantPadNd(pad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustContiguous(del bool) (retVal *Tensor) { - - retVal, err := ts.Contiguous(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv1dPadding(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv2dPadding(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv3dPadding(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ConvDepthwise3d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConvDepthwise3dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.ConvDepthwise3dBackward(gradInput, gradWeight, gradBias, gradOutput, weight, kernelSize, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ConvTbc(weight, bias, pad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.ConvTbcBackward(input, weight, bias, pad, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { - - retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { - - retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { - - retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor) { - - retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor) { - - retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCopySparseToSparse_(src *Tensor, nonBlocking bool) { - - err := ts.CopySparseToSparse_(src, nonBlocking) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCopysign(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Copysign(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCopysign_(other *Tensor) { - - err := ts.Copysign_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCopysignOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CopysignOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCopysignScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.CopysignScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCopysignScalar_(other *Scalar) { - - err := ts.CopysignScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCopysignScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.CopysignScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCorrcoef(del bool) (retVal *Tensor) { - - retVal, err := ts.Corrcoef(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCos(del bool) (retVal *Tensor) { - - retVal, err := ts.Cos(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCos_() { - - err := ts.Cos_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCosOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CosOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCosh(del bool) (retVal *Tensor) { - - retVal, err := ts.Cosh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCosh_() { - - err := ts.Cosh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCoshOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CoshOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor) { - - retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64) (retVal *Tensor) { - - retVal, err := CosineSimilarity(x1, x2, dim, eps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCountNonzero(dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.CountNonzero(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCountNonzeroDimIntlist(dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.CountNonzeroDimIntlist(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCov(correction int64, fweights *Tensor, aweights *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Cov(correction, fweights, aweights, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCross(other *Tensor, dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Cross(other, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool) (retVal *Tensor) { - - retVal, err := ts.CrossEntropyLoss(target, weight, reduction, ignoreIndex, labelSmoothing, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCrossOut(out *Tensor, other *Tensor, dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.CrossOut(out, other, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCrowIndices(del bool) (retVal *Tensor) { - - retVal, err := ts.CrowIndices(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor) { - - retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor) { - - retVal, err := CtcLossTensor(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor) { - - retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor) { - - retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := CudnnBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func MustCudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := CudnnBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon, reserveSpace) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustCudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionAddRelu(weight, z, alpha, bias, stride, padding, dilation, groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool) (retVal *Tensor) { - - retVal, err := CudnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionDeprecated(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionDeprecated(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionDeprecated2(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionDeprecated2(weight, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionRelu(weight, bias, stride, padding, dilation, groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool) (retVal *Tensor) { - - retVal, err := CudnnConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionTransposeDeprecated(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionTransposeDeprecated(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionTransposeDeprecated2(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionTransposeDeprecated2(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnGridSampler(grid, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.CudnnGridSamplerBackward(grid, gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustCudnnIsAcceptable(del bool) (retVal bool) { - - retVal, err := ts.CudnnIsAcceptable(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCummax(dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Cummax(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustCummaxOut(values *Tensor, indices *Tensor, dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.CummaxOut(values, indices, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustCummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := CummaxminBackward(grad, input, indices, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCummin(dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Cummin(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustCumminOut(values *Tensor, indices *Tensor, dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.CumminOut(values, indices, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Cumprod(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumprod_(dim int64, dtype gotch.DType) { - - err := ts.Cumprod_(dim, dtype) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustCumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor) (retVal *Tensor) { - - retVal, err := CumprodBackward(grad, input, dim, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.CumprodOut(out, dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Cumsum(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumsum_(dim int64, dtype gotch.DType) { - - err := ts.Cumsum_(dim, dtype) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.CumsumOut(out, dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCumulativeTrapezoid(y *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := CumulativeTrapezoid(y, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := CumulativeTrapezoidX(y, x, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustData(del bool) (retVal *Tensor) { - - retVal, err := ts.Data(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDeg2rad(del bool) (retVal *Tensor) { - - retVal, err := ts.Deg2rad(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDeg2rad_() { - - err := ts.Deg2rad_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDeg2radOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Deg2radOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDenseDim(del bool) (retVal int64) { - - retVal, err := ts.DenseDim(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDequantize(del bool) (retVal *Tensor) { - - retVal, err := ts.Dequantize(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDet(del bool) (retVal *Tensor) { - - retVal, err := ts.Det(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDetach(del bool) (retVal *Tensor) { - - retVal, err := ts.Detach(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDetach_() { - - err := ts.Detach_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDiag(diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Diag(diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustDiagBackward(grad *Tensor, inputSizes []int64, diagonal int64) (retVal *Tensor) { - - retVal, err := DiagBackward(grad, inputSizes, diagonal) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.DiagEmbed(offset, dim1, dim2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.DiagOut(out, diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagflat(offset int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Diagflat(offset, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Diagonal(offset, dim1, dim2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustDiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64) (retVal *Tensor) { - - retVal, err := DiagonalBackward(gradOutput, inputSizes, offset, dim1, dim2) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Diff(n, dim, prepend, append, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DiffOut(out, n, dim, prepend, append, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDigamma(del bool) (retVal *Tensor) { - - retVal, err := ts.Digamma(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDigamma_() { - - err := ts.Digamma_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDigammaOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DigammaOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDist(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Dist(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiv(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Div(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiv_(other *Tensor) { - - err := ts.Div_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DivOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool) (retVal *Tensor) { - - retVal, err := ts.DivOutMode(out, other, roundingMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.DivScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivScalar_(other *Scalar) { - - err := ts.DivScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivScalarMode(other *Scalar, roundingMode string, del bool) (retVal *Tensor) { - - retVal, err := ts.DivScalarMode(other, roundingMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivScalarMode_(other *Scalar, roundingMode string) { - - err := ts.DivScalarMode_(other, roundingMode) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivTensorMode(other *Tensor, roundingMode string, del bool) (retVal *Tensor) { - - retVal, err := ts.DivTensorMode(other, roundingMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivTensorMode_(other *Tensor, roundingMode string) { - - err := ts.DivTensorMode_(other, roundingMode) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivide(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Divide(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivide_(other *Tensor) { - - err := ts.Divide_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DivideOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool) (retVal *Tensor) { - - retVal, err := ts.DivideOutMode(out, other, roundingMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivideScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.DivideScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivideScalar_(other *Scalar) { - - err := ts.DivideScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivideScalarMode(other *Scalar, roundingMode string, del bool) (retVal *Tensor) { - - retVal, err := ts.DivideScalarMode(other, roundingMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivideScalarMode_(other *Scalar, roundingMode string) { - - err := ts.DivideScalarMode_(other, roundingMode) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivideTensorMode(other *Tensor, roundingMode string, del bool) (retVal *Tensor) { - - retVal, err := ts.DivideTensorMode(other, roundingMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDivideTensorMode_(other *Tensor, roundingMode string) { - - err := ts.DivideTensorMode_(other, roundingMode) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDot(tensor *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Dot(tensor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDotOut(out *Tensor, tensor *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DotOut(out, tensor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := Dropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDropout_(p float64, train bool) { - - err := ts.Dropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustDstack(tensors []Tensor) (retVal *Tensor) { - - retVal, err := Dstack(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustDstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor) { - - retVal, err := DstackOut(out, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEig(eigenvectors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Eig(eigenvectors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustEigE(e *Tensor, v *Tensor, eigenvectors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.EigE(e, v, eigenvectors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustEinsum(equation string, tensors []Tensor) (retVal *Tensor) { - - retVal, err := Einsum(equation, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustElu(del bool) (retVal *Tensor) { - - retVal, err := ts.Elu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustElu_() { - - err := ts.Elu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustEluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor) (retVal *Tensor) { - - retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, isResult, selfOrResult) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor) (retVal *Tensor) { - - retVal, err := EluBackwardGradInput(gradInput, gradOutput, alpha, scale, inputScale, isResult, selfOrResult) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEluOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.EluOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmbedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor) { - - retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor) { - - retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func MustEmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := EmbeddingBagPaddingIdx(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func MustEmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor) { - - retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64) { - - err := ts.EmbeddingRenorm_(indices, maxNorm, normType) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustEmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor) { - - retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Empty(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEmptyLike(del bool) (retVal *Tensor) { - - retVal, err := ts.EmptyLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmptyOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := EmptyOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := EmptyQuantized(size, qtensor, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEq(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Eq(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEq_(other *Scalar) { - - err := ts.Eq_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustEqScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.EqScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEqTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.EqTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEqTensor_(other *Tensor) { - - err := ts.EqTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustEqTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.EqTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEqual(other *Tensor, del bool) (retVal bool) { - - retVal, err := ts.Equal(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErf(del bool) (retVal *Tensor) { - - retVal, err := ts.Erf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErf_() { - - err := ts.Erf_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustErfOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ErfOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfc(del bool) (retVal *Tensor) { - - retVal, err := ts.Erfc(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfc_() { - - err := ts.Erfc_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustErfcOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ErfcOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfinv(del bool) (retVal *Tensor) { - - retVal, err := ts.Erfinv(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfinv_() { - - err := ts.Erfinv_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustErfinvOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ErfinvOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExp(del bool) (retVal *Tensor) { - - retVal, err := ts.Exp(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExp2(del bool) (retVal *Tensor) { - - retVal, err := ts.Exp2(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExp2_() { - - err := ts.Exp2_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustExp2Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Exp2Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExp_() { - - err := ts.Exp_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustExpOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ExpOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpand(size []int64, implicit bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Expand(size, implicit, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpandAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ExpandAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpm1(del bool) (retVal *Tensor) { - - retVal, err := ts.Expm1(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpm1_() { - - err := ts.Expm1_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustExpm1Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Expm1Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExponential_(lambd float64) { - - err := ts.Exponential_(lambd) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Eye(n, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := EyeM(n, m, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEyeMOut(out *Tensor, n int64, m int64) (retVal *Tensor) { - - retVal, err := EyeMOut(out, n, m) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEyeOut(out *Tensor, n int64) (retVal *Tensor) { - - retVal, err := EyeOut(out, n) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.FakeQuantizePerChannelAffineCachemask(scale, zeroPoint, axis, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustFakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor) (retVal *Tensor) { - - retVal, err := FakeQuantizePerChannelAffineCachemaskBackward(grad, mask) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.FakeQuantizePerTensorAffineCachemask(scale, zeroPoint, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustFakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor) (retVal *Tensor) { - - retVal, err := FakeQuantizePerTensorAffineCachemaskBackward(grad, mask) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FakeQuantizePerTensorAffineTensorQparams(scale, zeroPoint, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmPackGemmMatrixFp16(input *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmPackGemmMatrixFp16(input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmPackQuantizedMatrix(input *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmPackQuantizedMatrix(input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64) (retVal *Tensor) { - - retVal, err := FbgemmPackQuantizedMatrixKn(input, k, n) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFeatureAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := FeatureAlphaDropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFeatureAlphaDropout_(p float64, train bool) { - - err := ts.FeatureAlphaDropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustFeatureDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := FeatureDropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFeatureDropout_(p float64, train bool) { - - err := ts.FeatureDropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFftFft(n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftFft(n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftFft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftFft2(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftFft2Out(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftFftOut(out, n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := FftFftfreq(n, d, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFftFftfreqOut(out *Tensor, n int64, d float64) (retVal *Tensor) { - - retVal, err := FftFftfreqOut(out, n, d) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftFftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftFftn(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftFftnOut(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftFftshift(dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FftFftshift(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftHfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftHfft(n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftHfftOut(out, n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIfft(n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIfft2(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIfft2Out(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIfftOut(out, n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIfftn(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIfftnOut(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIfftshift(dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIfftshift(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIhfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIhfft(n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIhfftOut(out, n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIrfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIrfft(n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIrfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIrfft2(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIrfft2Out(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIrfftOut(out, n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIrfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIrfftn(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftIrfftnOut(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftRfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftRfft(n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftRfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftRfft2(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftRfft2Out(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftRfftOut(out, n, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := FftRfftfreq(n, d, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFftRfftfreqOut(out *Tensor, n int64, d float64) (retVal *Tensor) { - - retVal, err := FftRfftfreqOut(out, n, d) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftRfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftRfftn(s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor) { - - retVal, err := ts.FftRfftnOut(out, s, dim, norm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFill_(value *Scalar) { - - err := ts.Fill_(value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFillDiagonal_(fillValue *Scalar, wrap bool) { - - err := ts.FillDiagonal_(fillValue, wrap) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFillTensor_(value *Tensor) { - - err := ts.FillTensor_(value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFix(del bool) (retVal *Tensor) { - - retVal, err := ts.Fix(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFix_() { - - err := ts.Fix_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFixOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FixOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFlatten(startDim int64, endDim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Flatten(startDim, endDim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFlattenDenseTensors(tensors []Tensor) (retVal *Tensor) { - - retVal, err := FlattenDenseTensors(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFlip(dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Flip(dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFliplr(del bool) (retVal *Tensor) { - - retVal, err := ts.Fliplr(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFlipud(del bool) (retVal *Tensor) { - - retVal, err := ts.Flipud(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloatPower(exponent *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloatPower(exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloatPower_(exponent *Scalar) { - - err := ts.FloatPower_(exponent) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustFloatPowerScalar(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { - - retVal, err := FloatPowerScalar(selfScalar, exponent) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { - - retVal, err := FloatPowerScalarOut(out, selfScalar, exponent) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloatPowerTensor_(exponent *Tensor) { - - err := ts.FloatPowerTensor_(exponent) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFloatPowerTensorScalar(exponent *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FloatPowerTensorScalar(exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FloatPowerTensorScalarOut(out, exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloatPowerTensorTensorOut(out, exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloor(del bool) (retVal *Tensor) { - - retVal, err := ts.Floor(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloor_() { - - err := ts.Floor_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFloorDivide(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorDivide(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloorDivide_(other *Tensor) { - - err := ts.FloorDivide_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFloorDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorDivideOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloorDivideScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorDivideScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloorDivideScalar_(other *Scalar) { - - err := ts.FloorDivideScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFloorOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmax(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Fmax(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FmaxOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmin(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Fmin(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFminOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FminOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmod(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Fmod(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmod_(other *Scalar) { - - err := ts.Fmod_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFmodScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FmodScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmodTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FmodTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmodTensor_(other *Tensor) { - - err := ts.FmodTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFmodTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FmodTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrac(del bool) (retVal *Tensor) { - - retVal, err := ts.Frac(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrac_() { - - err := ts.Frac_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFracOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FracOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.FractionalMaxPool2d(kernelSize, outputSize, randomSamples, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustFractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.FractionalMaxPool2dOutput(output, indices, kernelSize, outputSize, randomSamples, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustFractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.FractionalMaxPool3d(kernelSize, outputSize, randomSamples, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustFractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.FractionalMaxPool3dOutput(output, indices, kernelSize, outputSize, randomSamples, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustFrexp(del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Frexp(del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustFrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.FrexpTensorOut(mantissa, exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustFrobeniusNorm(del bool) (retVal *Tensor) { - - retVal, err := ts.FrobeniusNorm(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrobeniusNormDim(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.FrobeniusNormDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Full(size, fillValue, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFullLike(fillValue *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FullLike(fillValue, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFullOut(out *Tensor, size []int64, fillValue *Scalar) (retVal *Tensor) { - - retVal, err := FullOut(out, size, fillValue) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool) (retVal *Tensor) { - - retVal, err := ts.FusedMovingAvgObsFakeQuant(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGather(dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Gather(dim, index, sparseGrad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor) { - - retVal, err := ts.GatherBackward(grad, dim, index, sparseGrad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor) { - - retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGcd(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Gcd(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGcd_(other *Tensor) { - - err := ts.Gcd_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGcdOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GcdOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGe(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Ge(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGe_(other *Scalar) { - - err := ts.Ge_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.GeScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GeTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeTensor_(other *Tensor) { - - err := ts.GeTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GeTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Gelu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeluBackward(grad *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GeluBackward(grad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeluBackwardGradInput(gradInput *Tensor, grad *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GeluBackwardGradInput(gradInput, grad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeluOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GeluOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeometric_(p float64) { - - err := ts.Geometric_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGeqrf(del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Geqrf(del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustGeqrfA(a *Tensor, tau *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.GeqrfA(a, tau, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustGer(vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Ger(vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGerOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GerOut(out, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGlu(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Glu(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGluBackward(gradOutput *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.GluBackward(gradOutput, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.GluBackwardGradInput(gradInput, gradOutput, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGluOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.GluOut(out, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGrad(del bool) (retVal *Tensor) { - - retVal, err := ts.Grad(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreater(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Greater(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreater_(other *Scalar) { - - err := ts.Greater_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGreaterEqual(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.GreaterEqual(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreaterEqual_(other *Scalar) { - - err := ts.GreaterEqual_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGreaterEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.GreaterEqualScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreaterEqualTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GreaterEqualTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreaterEqualTensor_(other *Tensor) { - - err := ts.GreaterEqualTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGreaterEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GreaterEqualTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreaterScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.GreaterScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreaterTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GreaterTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGreaterTensor_(other *Tensor) { - - err := ts.GreaterTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGreaterTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GreaterTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGridSampler2dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := GridSampler2dBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustGridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGridSampler3dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := GridSampler3dBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustGroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGru(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := Gru(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { - - retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := GruData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustGt(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Gt(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGt_(other *Scalar) { - - err := ts.Gt_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGtScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.GtScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGtTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GtTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGtTensor_(other *Tensor) { - - err := ts.GtTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGtTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GtTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindowPeriodicAlpha(windowLength, periodic, alpha, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindowPeriodicAlphaBeta(windowLength, periodic, alpha, beta, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HannWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HannWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardshrink(del bool) (retVal *Tensor) { - - retVal, err := ts.Hardshrink(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.HardshrinkBackward(gradOut, lambd, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.HardshrinkBackwardGradInput(gradInput, gradOut, lambd, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardshrinkOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardshrinkOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardsigmoid(del bool) (retVal *Tensor) { - - retVal, err := ts.Hardsigmoid(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardsigmoid_() { - - err := ts.Hardsigmoid_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustHardsigmoidBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardsigmoidBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardsigmoidBackwardGradInput(gradInput, gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardsigmoidOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardsigmoidOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardswish(del bool) (retVal *Tensor) { - - retVal, err := ts.Hardswish(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardswish_() { - - err := ts.Hardswish_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustHardswishBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardswishBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardswishOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardswishOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanh(del bool) (retVal *Tensor) { - - retVal, err := ts.Hardtanh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanh_() { - - err := ts.Hardtanh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustHardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.HardtanhBackwardGradInput(gradInput, gradOutput, minVal, maxVal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardtanhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHeaviside(values *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Heaviside(values, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHeaviside_(values *Tensor) { - - err := ts.Heaviside_(values) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustHeavisideOut(out *Tensor, values *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HeavisideOut(out, values, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHistc(bins int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Histc(bins, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool) (retVal *Tensor) { - - retVal, err := ts.HistcOut(out, bins, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHspmm(mat1 *Tensor, mat2 *Tensor) (retVal *Tensor) { - - retVal, err := Hspmm(mat1, mat2) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor) (retVal *Tensor) { - - retVal, err := HspmmOut(out, mat1, mat2) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHstack(tensors []Tensor) (retVal *Tensor) { - - retVal, err := Hstack(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor) { - - retVal, err := HstackOut(out, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHuberLoss(target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.HuberLoss(target, reduction, delta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.HuberLossBackward(gradOutput, target, reduction, delta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.HuberLossBackwardOut(gradInput, gradOutput, target, reduction, delta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.HuberLossOut(out, target, reduction, delta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHypot(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Hypot(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHypot_(other *Tensor) { - - err := ts.Hypot_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustHypotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HypotOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustI0(del bool) (retVal *Tensor) { - - retVal, err := ts.I0(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustI0_() { - - err := ts.I0_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustI0Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.I0Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIgamma(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Igamma(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIgamma_(other *Tensor) { - - err := ts.Igamma_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIgammaOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IgammaOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIgammac(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Igammac(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIgammac_(other *Tensor) { - - err := ts.Igammac_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIgammacOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IgammacOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIm2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIm2colBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Im2colBackwardGradInput(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIm2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustImag(del bool) (retVal *Tensor) { - - retVal, err := ts.Imag(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexAdd(dim, index, source, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexAdd_(dim int64, index *Tensor, source *Tensor) { - - err := ts.IndexAdd_(dim, index, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexAddAlpha(dim int64, index *Tensor, source *Tensor, alpha *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexAddAlpha(dim, index, source, alpha, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexAddAlpha_(dim int64, index *Tensor, source *Tensor, alpha *Scalar) { - - err := ts.IndexAddAlpha_(dim, index, source, alpha) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexCopy(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexCopy(dim, index, source, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexCopy_(dim int64, index *Tensor, source *Tensor) { - - err := ts.IndexCopy_(dim, index, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexFill(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexFill(dim, index, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexFill_(dim int64, index *Tensor, value *Scalar) { - - err := ts.IndexFill_(dim, index, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexFillIntTensor(dim, index, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexFillIntTensor_(dim int64, index *Tensor, value *Tensor) { - - err := ts.IndexFillIntTensor_(dim, index, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexSelect(dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor) (retVal *Tensor) { - - retVal, err := IndexSelectBackward(grad, selfSizes, dim, index) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexSelectOut(out, dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndices(del bool) (retVal *Tensor) { - - retVal, err := ts.Indices(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustInfinitelyDifferentiableGeluBackward(grad *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.InfinitelyDifferentiableGeluBackward(grad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustInner(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Inner(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustInnerOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.InnerOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustInstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIntRepr(del bool) (retVal *Tensor) { - - retVal, err := ts.IntRepr(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustInverse(del bool) (retVal *Tensor) { - - retVal, err := ts.Inverse(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustInverseOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.InverseOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsCoalesced(del bool) (retVal bool) { - - retVal, err := ts.IsCoalesced(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsComplex(del bool) (retVal bool) { - - retVal, err := ts.IsComplex(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsConj(del bool) (retVal bool) { - - retVal, err := ts.IsConj(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsDistributed(del bool) (retVal bool) { - - retVal, err := ts.IsDistributed(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsFloatingPoint(del bool) (retVal bool) { - - retVal, err := ts.IsFloatingPoint(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsInference(del bool) (retVal bool) { - - retVal, err := ts.IsInference(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsLeaf(del bool) (retVal bool) { - - retVal, err := ts.IsLeaf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsNeg(del bool) (retVal bool) { - - retVal, err := ts.IsNeg(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsNonzero(del bool) (retVal bool) { - - retVal, err := ts.IsNonzero(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsPinned(device gotch.Device, del bool) (retVal bool) { - - retVal, err := ts.IsPinned(device, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsSameSize(other *Tensor, del bool) (retVal bool) { - - retVal, err := ts.IsSameSize(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsSetTo(tensor *Tensor, del bool) (retVal bool) { - - retVal, err := ts.IsSetTo(tensor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsSigned(del bool) (retVal bool) { - - retVal, err := ts.IsSigned(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIsVulkanAvailable() (retVal bool) { - - retVal, err := IsVulkanAvailable() - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Isclose(other, rtol, atol, equalNan, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsfinite(del bool) (retVal *Tensor) { - - retVal, err := ts.Isfinite(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIsin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor) { - - retVal, err := Isin(elements, testElements, assumeUnique, invert) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor) { - - retVal, err := IsinScalarTensor(element, testElements, assumeUnique, invert) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor) { - - retVal, err := IsinScalarTensorOut(out, element, testElements, assumeUnique, invert) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool) (retVal *Tensor) { - - retVal, err := IsinTensorScalar(elements, testElement, assumeUnique, invert) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool) (retVal *Tensor) { - - retVal, err := IsinTensorScalarOut(out, elements, testElement, assumeUnique, invert) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor) { - - retVal, err := IsinTensorTensorOut(out, elements, testElements, assumeUnique, invert) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsinf(del bool) (retVal *Tensor) { - - retVal, err := ts.Isinf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsnan(del bool) (retVal *Tensor) { - - retVal, err := ts.Isnan(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsneginf(del bool) (retVal *Tensor) { - - retVal, err := ts.Isneginf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsneginfOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IsneginfOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsposinf(del bool) (retVal *Tensor) { - - retVal, err := ts.Isposinf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsposinfOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IsposinfOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsreal(del bool) (retVal *Tensor) { - - retVal, err := ts.Isreal(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIstft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Istft(nFft, hopLength, winLength, window, center, normalized, onesided, length, returnComplex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustKaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := KaiserWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustKaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := KaiserWindowBeta(windowLength, periodic, beta, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustKaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := KaiserWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustKlDiv(target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor) { - - retVal, err := ts.KlDiv(target, reduction, logTarget, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustKlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor) { - - retVal, err := ts.KlDivBackward(gradOutput, target, reduction, logTarget, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustKron(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Kron(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustKronOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.KronOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustKthvalue(k int64, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Kthvalue(k, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustKthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.KthvalueValues(values, indices, k, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustL1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1Loss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1LossBackwardGradInput(gradInput, gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1LossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool) (retVal *Tensor) { - - retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLcm(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Lcm(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLcm_(other *Tensor) { - - err := ts.Lcm_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLcmOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LcmOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLdexp(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Ldexp(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLdexp_(other *Tensor) { - - err := ts.Ldexp_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLdexpOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LdexpOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLe(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Le(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLe_(other *Scalar) { - - err := ts.Le_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LeScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LeTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeTensor_(other *Tensor) { - - err := ts.LeTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LeTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeakyRelu(del bool) (retVal *Tensor) { - - retVal, err := ts.LeakyRelu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeakyRelu_() { - - err := ts.LeakyRelu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LeakyReluBackwardGradInput(gradInput, gradOutput, negativeSlope, selfIsResult, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeakyReluOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LeakyReluOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerp(end *Tensor, weight *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Lerp(end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerp_(end *Tensor, weight *Scalar) { - - err := ts.Lerp_(end, weight) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LerpScalarOut(out, end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerpTensor(end *Tensor, weight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LerpTensor(end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerpTensor_(end *Tensor, weight *Tensor) { - - err := ts.LerpTensor_(end, weight) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LerpTensorOut(out, end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLess(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Less(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLess_(other *Scalar) { - - err := ts.Less_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLessEqual(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LessEqual(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLessEqual_(other *Scalar) { - - err := ts.LessEqual_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLessEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LessEqualScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLessEqualTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LessEqualTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLessEqualTensor_(other *Tensor) { - - err := ts.LessEqualTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLessEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LessEqualTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLessScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LessScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLessTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LessTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLessTensor_(other *Tensor) { - - err := ts.LessTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLessTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LessTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLgamma(del bool) (retVal *Tensor) { - - retVal, err := ts.Lgamma(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLgamma_() { - - err := ts.Lgamma_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLgammaOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LgammaOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgCholesky(upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgCholesky(upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgCholeskyEx(upper bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgCholeskyEx(upper, checkErrors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgCholeskyExL(l, info, upper, checkErrors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgCholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgCholeskyOut(out, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgCond(p *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgCond(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgCondOut(out *Tensor, p *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgCondOut(out, p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgCondPStr(p string, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgCondPStr(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgCondPStrOut(out *Tensor, p string, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgCondPStrOut(out, p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgDet(del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgDet(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgDetOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgDetOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgEig(del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgEig(del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgEigOut(eigenvalues, eigenvectors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgEigh(uPLO string, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgEigh(uPLO, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgEighEigvals(eigvals, eigvecs, uPLO, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgEigvals(del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgEigvals(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgEigvalsOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgEigvalsOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgEigvalsh(uPLO string, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgEigvalsh(uPLO, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgEigvalshOut(out *Tensor, uPLO string, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgEigvalshOut(out, uPLO, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgHouseholderProduct(input *Tensor, tau *Tensor) (retVal *Tensor) { - - retVal, err := LinalgHouseholderProduct(input, tau) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor) (retVal *Tensor) { - - retVal, err := LinalgHouseholderProductOut(out, input, tau) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgInv(del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgInv(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgInvEx(checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgInvEx(checkErrors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgInvExInverse(inverse *Tensor, info *Tensor, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgInvExInverse(inverse, info, checkErrors, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgInvOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgInvOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgLstsq(b *Tensor, rcond []float64, driver string, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsq(b, rcond, driver, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func (ts *Tensor) MustLinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsqOut(solution, residuals, rank, singularValues, b, rcond, driver, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3 -} - -func (ts *Tensor) MustLinalgMatmul(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgMatmul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgMatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgMatmulOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgMatrixPower(n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgMatrixPower(n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgMatrixPowerOut(out *Tensor, n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgMatrixPowerOut(out, n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgMatrixRank(tol []float64, hermitian bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgMatrixRank(tol, hermitian, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgMatrixRankOut(out *Tensor, tol []float64, hermitian bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgMatrixRankOut(out, tol, hermitian, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool) (retVal *Tensor) { - - retVal, err := LinalgMatrixRankOutTolTensor(out, input, tol, hermitian) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool) (retVal *Tensor) { - - retVal, err := LinalgMatrixRankTolTensor(input, tol, hermitian) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgMultiDot(tensors []Tensor) (retVal *Tensor) { - - retVal, err := LinalgMultiDot(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgMultiDotOut(out *Tensor, tensors []Tensor) (retVal *Tensor) { - - retVal, err := LinalgMultiDotOut(out, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgNorm(ord, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgNormOrdStr(ord, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgNormOrdStrOut(out, ord, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgNormOut(out, ord, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgPinv(rcond float64, hermitian bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgPinv(rcond, hermitian, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgPinvOut(out, rcond, hermitian, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgPinvOutRcondTensor(out, rcond, hermitian, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgPinvRcondTensor(rcond, hermitian, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgQr(mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgQr(mode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgQrOut(q *Tensor, r *Tensor, mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgQrOut(q, r, mode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgSlogdet(del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgSlogdet(del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LinalgSlogdetOut(sign, logabsdet, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustLinalgSolve(input *Tensor, other *Tensor) (retVal *Tensor) { - - retVal, err := LinalgSolve(input, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgSolveOut(out *Tensor, input *Tensor, other *Tensor) (retVal *Tensor) { - - retVal, err := LinalgSolveOut(out, input, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgSvd(fullMatrices bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.LinalgSvd(fullMatrices, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustLinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, fullMatrices bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.LinalgSvdU(u, s, vh, fullMatrices, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustLinalgSvdvals(input *Tensor) (retVal *Tensor) { - - retVal, err := LinalgSvdvals(input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinalgSvdvalsOut(out *Tensor, input *Tensor) (retVal *Tensor) { - - retVal, err := LinalgSvdvalsOut(out, input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgTensorinv(ind int64, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgTensorinv(ind, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgTensorinvOut(out *Tensor, ind int64, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgTensorinvOut(out, ind, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgTensorsolve(other *Tensor, dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgTensorsolve(other, dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.LinalgTensorsolveOut(out, other, dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := Linear(input, weight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := LinearOut(out, input, weight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinspace(start *Scalar, end *Scalar, steps []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64) (retVal *Tensor) { - - retVal, err := LinspaceOut(out, start, end, steps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog(del bool) (retVal *Tensor) { - - retVal, err := ts.Log(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog10(del bool) (retVal *Tensor) { - - retVal, err := ts.Log10(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog10_() { - - err := ts.Log10_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLog10Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Log10Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog1p(del bool) (retVal *Tensor) { - - retVal, err := ts.Log1p(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog1p_() { - - err := ts.Log1p_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLog1pOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Log1pOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog2(del bool) (retVal *Tensor) { - - retVal, err := ts.Log2(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog2_() { - - err := ts.Log2_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLog2Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Log2Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog_() { - - err := ts.Log_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogNormal_(mean float64, std float64) { - - err := ts.LogNormal_(mean, std) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoid(del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoid(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoidBackwardGradInput(gradInput, gradOutput, buffer, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoidOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoidOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSoftmax(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogaddexp(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Logaddexp(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogaddexp2(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Logaddexp2(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogaddexp2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Logaddexp2Out(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogaddexpOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogaddexpOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogcumsumexp(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Logcumsumexp(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.LogcumsumexpOut(out, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogdet(del bool) (retVal *Tensor) { - - retVal, err := ts.Logdet(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalAnd(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalAnd(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalAnd_(other *Tensor) { - - err := ts.LogicalAnd_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalAndOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalNot(del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalNot(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalNot_() { - - err := ts.LogicalNot_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalNotOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalNotOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalOr(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalOr(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalOr_(other *Tensor) { - - err := ts.LogicalOr_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalOrOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalXor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalXor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalXor_(other *Tensor) { - - err := ts.LogicalXor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalXorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogit(eps []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.Logit(eps, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogit_(eps []float64) { - - err := ts.Logit_(eps) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogitBackward(gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.LogitBackward(gradOutput, eps, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.LogitBackwardGradInput(gradInput, gradOutput, eps, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogitOut(out *Tensor, eps []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.LogitOut(out, eps, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLogspace(start *Scalar, end *Scalar, steps []int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64, base float64) (retVal *Tensor) { - - retVal, err := LogspaceOut(out, start, end, steps, base) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Logsumexp(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LogsumexpOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLstm(input *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := Lstm(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := LstmCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustLstmData(data *Tensor, batchSizes *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := LstmData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustLstsq(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Lstsq(a, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLstsqX(x *Tensor, qr *Tensor, a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.LstsqX(x, qr, a, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustLt(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Lt(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLt_(other *Scalar) { - - err := ts.Lt_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLtScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LtScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLtTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LtTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLtTensor_(other *Tensor) { - - err := ts.LtTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLtTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LtTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLuSolve(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LuSolve(lUData, lUPivots, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := LuUnpack(lUData, lUPivots, unpackData, unpackPivots) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustLuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := LuUnpackOut(p, l, u, lUData, lUPivots, unpackData, unpackPivots) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustMarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor) { - - retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedFill(mask *Tensor, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedFill(mask, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedFill_(mask *Tensor, value *Scalar) { - - err := ts.MaskedFill_(mask, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMaskedFillTensor(mask *Tensor, value *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedFillTensor(mask, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedFillTensor_(mask *Tensor, value *Tensor) { - - err := ts.MaskedFillTensor_(mask, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMaskedScatter(mask *Tensor, source *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedScatter(mask, source, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedScatter_(mask *Tensor, source *Tensor) { - - err := ts.MaskedScatter_(mask, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMaskedSelect(mask *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedSelect(mask, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor) (retVal *Tensor) { - - retVal, err := MaskedSelectBackward(grad, input, mask) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedSelectOut(out *Tensor, mask *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedSelectOut(out, mask, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatmul(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Matmul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MatmulOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixExp(del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixExp(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixExpBackward(grad *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixExpBackward(grad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixPower(n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixPower(n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixPowerOut(out *Tensor, n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixPowerOut(out, n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixRank(symmetric bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixRank(symmetric, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixRankTol(tol float64, symmetric bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixRankTol(tol, symmetric, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMax(del bool) (retVal *Tensor) { - - retVal, err := ts.Max(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MaxDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MaxDimMax(max, maxValues, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMaxOther(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxOther(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MaxPool1dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MaxPool2dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool2dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MaxPool2dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MaxPool3dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool3dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MaxPool3dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMaxUnpool2d(indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2d(indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2dBackwardGradInput(gradInput, gradOutput, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3dBackwardGradInput(gradInput, gradOutput, indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaximum(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Maximum(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaximumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaximumOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMean(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Mean(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.MeanDim(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMedian(del bool) (retVal *Tensor) { - - retVal, err := ts.Median(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMedianDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MedianDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MedianDimValues(values, indices, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMin(del bool) (retVal *Tensor) { - - retVal, err := ts.Min(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMinDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MinDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MinDimMin(min, minIndices, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMinOther(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MinOther(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMinOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MinOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMinimum(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Minimum(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMinimumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MinimumOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := MiopenBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustMiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := MiopenBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustMiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenConvolutionBackwardBias(gradOutput *Tensor) (retVal *Tensor) { - - retVal, err := MiopenConvolutionBackwardBias(gradOutput) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := MiopenConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := MiopenConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := MiopenDepthwiseConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenDepthwiseConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenRnn(input *Tensor, weight []Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) { - - retVal0, retVal1, retVal2, retVal3, retVal4, err := MiopenRnn(input, weight, weightStride0, hx, cx, mode, hiddenSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2, retVal3, retVal4 -} - -func (ts *Tensor) MustMish(del bool) (retVal *Tensor) { - - retVal, err := ts.Mish(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMish_() { - - err := ts.Mish_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMishBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MishBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMishOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MishOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnAdaptiveAvgPool2dBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMkldnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool) (retVal *Tensor) { - - retVal, err := MkldnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, biasDefined) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnConvolutionBackwardWeights(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.MkldnnConvolutionBackwardWeights(weightSize, gradOutput, padding, stride, dilation, groups, biasDefined, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMkldnnLinear(weight *Tensor, bias *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnLinear(weight, bias, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor) (retVal *Tensor) { - - retVal, err := MkldnnLinearBackwardInput(inputSize, gradOutput, weight) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := MkldnnLinearBackwardWeights(gradOutput, input, weight, biasDefined) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool) (retVal *Tensor) { - - retVal, err := MkldnnMaxPool2dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnMaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool) (retVal *Tensor) { - - retVal, err := MkldnnMaxPool3dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnReorderConv3dWeight(padding, stride, dilation, groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMm(mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Mm(mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MmOut(out, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMode(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Mode(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.ModeValues(values, indices, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustMoveaxis(source []int64, destination []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Moveaxis(source, destination, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMoveaxisInt(source int64, destination int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MoveaxisInt(source, destination, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMovedim(source []int64, destination []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Movedim(source, destination, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMovedimInt(source int64, destination int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MovedimInt(source, destination, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLoss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLossBackwardGradInput(gradInput, gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMsort(del bool) (retVal *Tensor) { - - retVal, err := ts.Msort(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMsortOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MsortOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMul(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Mul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMul_(other *Tensor) { - - err := ts.Mul_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MulOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMulScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.MulScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMulScalar_(other *Scalar) { - - err := ts.MulScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultiMarginLossBackwardGradInput(gradInput, gradOutput, target, p, margin, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLoss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, isTarget, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultinomial(numSamples int64, replacement bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Multinomial(numSamples, replacement, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MultinomialOut(out, numSamples, replacement, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultiply(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Multiply(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultiply_(other *Tensor) { - - err := ts.Multiply_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMultiplyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MultiplyOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultiplyScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.MultiplyScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultiplyScalar_(other *Scalar) { - - err := ts.MultiplyScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMv(vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Mv(vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMvOut(out *Tensor, vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MvOut(out, vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMvlgamma(p int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Mvlgamma(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMvlgamma_(p int64) { - - err := ts.Mvlgamma_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMvlgammaOut(out *Tensor, p int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MvlgammaOut(out, p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanToNum(nan []float64, posinf []float64, neginf []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.NanToNum(nan, posinf, neginf, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanToNum_(nan []float64, posinf []float64, neginf []float64) { - - err := ts.NanToNum_(nan, posinf, neginf) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.NanToNumOut(out, nan, posinf, neginf, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Nanmean(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NanmeanOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanmedian(del bool) (retVal *Tensor) { - - retVal, err := ts.Nanmedian(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanmedianDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.NanmedianDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustNanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.NanmedianDimValues(values, indices, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustNanquantile(q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Nanquantile(q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanquantileNew(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.NanquantileNew(q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanquantileNewOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.NanquantileNewOut(out, q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanquantileNewScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.NanquantileNewScalar(q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanquantileNewScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.NanquantileNewScalarOut(out, q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NanquantileOut(out, q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanquantileScalar(q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NanquantileScalar(q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NanquantileScalarOut(out, q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNansum(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Nansum(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNansumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NansumDimIntlist(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNansumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NansumIntlistOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNarrow(dim int64, start int64, length int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Narrow(dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NarrowCopy(dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NarrowCopyOut(out, dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNarrowTensor(dim int64, start *Tensor, length int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NarrowTensor(dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := NativeBatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustNativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := NativeBatchNormOut(out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, training, momentum, eps) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustNativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := NativeGroupNorm(input, weight, bias, n, c, hxW, group, eps) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func MustNativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := NativeLayerNorm(input, normalizedShape, weight, bias, eps) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustNativeNorm(del bool) (retVal *Tensor) { - - retVal, err := ts.NativeNorm(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NativeNormScalaroptDimDtype(p, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNe(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Ne(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNe_(other *Scalar) { - - err := ts.Ne_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.NeScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNeTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NeTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNeTensor_(other *Tensor) { - - err := ts.NeTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NeTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNeg(del bool) (retVal *Tensor) { - - retVal, err := ts.Neg(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNeg_() { - - err := ts.Neg_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNegOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NegOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNegative(del bool) (retVal *Tensor) { - - retVal, err := ts.Negative(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNegative_() { - - err := ts.Negative_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNegativeOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NegativeOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewEmptyStrided(size, stride, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewOnes(size, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNextafter(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Nextafter(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNextafter_(other *Tensor) { - - err := ts.Nextafter_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNextafterOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NextafterOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2dBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLossBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLossNd(target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNonzero(del bool) (retVal *Tensor) { - - retVal, err := ts.Nonzero(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNonzeroOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NonzeroOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNorm(del bool) (retVal *Tensor) { - - retVal, err := ts.Norm(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NormDtypeOut(out, p, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormExceptDim(v *Tensor, pow int64, dim int64) (retVal *Tensor) { - - retVal, err := NormExceptDim(v, pow, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NormOut(out, p, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NormScalaroptDim(p, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NormScalaroptDimDtype(p, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NormScalaroptDtype(p, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormal(out *Tensor, mean *Tensor, std float64) (retVal *Tensor) { - - retVal, err := Normal(out, mean, std) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormal_(mean float64, std float64) { - - err := ts.Normal_(mean, std) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustNormalFloatFloatOut(out *Tensor, mean float64, std float64, size []int64) (retVal *Tensor) { - - retVal, err := NormalFloatFloatOut(out, mean, std, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormalFloatTensorOut(out *Tensor, mean float64, std *Tensor) (retVal *Tensor) { - - retVal, err := NormalFloatTensorOut(out, mean, std) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormalTensorTensorOut(out *Tensor, mean *Tensor, std *Tensor) (retVal *Tensor) { - - retVal, err := NormalTensorTensorOut(out, mean, std) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNotEqual(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.NotEqual(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNotEqual_(other *Scalar) { - - err := ts.NotEqual_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNotEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.NotEqualScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNotEqualTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NotEqualTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNotEqualTensor_(other *Tensor) { - - err := ts.NotEqualTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNotEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NotEqualTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNorm(keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNorm(keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNormDim(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNormDim(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNormDimOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNormOut(out *Tensor, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNormOut(out, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNumpyT(del bool) (retVal *Tensor) { - - retVal, err := ts.NumpyT(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOneHot(numClasses int64, del bool) (retVal *Tensor) { - - retVal, err := ts.OneHot(numClasses, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Ones(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOnesLike(del bool) (retVal *Tensor) { - - retVal, err := ts.OnesLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustOnesOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := OnesOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrgqr(input2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Orgqr(input2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrgqrOut(out *Tensor, input2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.OrgqrOut(out, input2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrmqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Ormqr(input2, input3, left, transpose, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor) { - - retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOuter(vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Outer(vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOuterOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.OuterOut(out, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOutputNr(del bool) (retVal int64) { - - retVal, err := ts.OutputNr(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPadSequence(sequences []Tensor, batchFirst bool, paddingValue float64) (retVal *Tensor) { - - retVal, err := PadSequence(sequences, batchFirst, paddingValue) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool) (retVal *Tensor) { - - retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPdist(p float64, del bool) (retVal *Tensor) { - - retVal, err := ts.Pdist(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPermute(dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Permute(dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPinMemory(device gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.PinMemory(device, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPinverse(rcond float64, del bool) (retVal *Tensor) { - - retVal, err := ts.Pinverse(rcond, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPixelShuffle(upscaleFactor int64, del bool) (retVal *Tensor) { - - retVal, err := ts.PixelShuffle(upscaleFactor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPixelUnshuffle(downscaleFactor int64, del bool) (retVal *Tensor) { - - retVal, err := ts.PixelUnshuffle(downscaleFactor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPoisson(del bool) (retVal *Tensor) { - - retVal, err := ts.Poisson(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64) (retVal *Tensor) { - - retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPolar(abs *Tensor, angle *Tensor) (retVal *Tensor) { - - retVal, err := Polar(abs, angle) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPolarOut(out *Tensor, abs *Tensor, angle *Tensor) (retVal *Tensor) { - - retVal, err := PolarOut(out, abs, angle) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPolygamma(n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Polygamma(n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPolygamma_(n int64) { - - err := ts.Polygamma_(n) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustPolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.PolygammaOut(out, n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPositive(del bool) (retVal *Tensor) { - - retVal, err := ts.Positive(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPow(exponent *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Pow(exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPow_(exponent *Scalar) { - - err := ts.Pow_(exponent) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustPowScalar(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { - - retVal, err := PowScalar(selfScalar, exponent) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { - - retVal, err := PowScalarOut(out, selfScalar, exponent) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPowTensor_(exponent *Tensor) { - - err := ts.PowTensor_(exponent) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustPowTensorScalar(exponent *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.PowTensorScalar(exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPowTensorScalarOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.PowTensorScalarOut(out, exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPowTensorTensorOut(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.PowTensorTensorOut(out, exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPrelu(weight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Prelu(weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPreluBackward(gradOutput *Tensor, weight *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.PreluBackward(gradOutput, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustProd(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Prod(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.ProdDimInt(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.ProdIntOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPut(index *Tensor, source *Tensor, accumulate bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Put(index, source, accumulate, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool) { - - err := ts.Put_(index, source, accumulate) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustQPerChannelAxis(del bool) (retVal int64) { - - retVal, err := ts.QPerChannelAxis(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQPerChannelScales(del bool) (retVal *Tensor) { - - retVal, err := ts.QPerChannelScales(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQPerChannelZeroPoints(del bool) (retVal *Tensor) { - - retVal, err := ts.QPerChannelZeroPoints(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQScale(del bool) (retVal float64) { - - retVal, err := ts.QScale(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQZeroPoint(del bool) (retVal int64) { - - retVal, err := ts.QZeroPoint(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQr(some bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Qr(some, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustQrQ(q *Tensor, r *Tensor, some bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.QrQ(q, r, some, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustQuantile(q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Quantile(q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantileNew(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantileNew(q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantileNewOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantileNewOut(out, q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantileNewScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantileNewScalar(q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantileNewScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantileNewScalarOut(out, q, dim, keepdim, interpolation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantileOut(out, q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantileScalar(q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantileScalar(q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantileScalarOut(out, q, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizePerTensorTensorQparams(scale, zeroPoint, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64) (retVal *Tensor) { - - retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { - - retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := QuantizedLstmCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustQuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizedMaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { - - retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { - - retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRad2deg(del bool) (retVal *Tensor) { - - retVal, err := ts.Rad2deg(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRad2deg_() { - - err := ts.Rad2deg_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRad2degOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Rad2degOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Rand(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandLike(del bool) (retVal *Tensor) { - - retVal, err := ts.RandLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := RandOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Randint(high, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandintLike(high int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RandintLike(high, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandintLikeLowDtype(low int64, high int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RandintLikeLowDtype(low, high, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := RandintLow(low, high, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandintLowOut(out *Tensor, low int64, high int64, size []int64) (retVal *Tensor) { - - retVal, err := RandintLowOut(out, low, high, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandintOut(out *Tensor, high int64, size []int64) (retVal *Tensor) { - - retVal, err := RandintOut(out, high, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Randn(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandnLike(del bool) (retVal *Tensor) { - - retVal, err := ts.RandnLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandnOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := RandnOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandom_() { - - err := ts.Random_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRandomFrom_(from int64, to []int64) { - - err := ts.RandomFrom_(from, to) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRandomTo_(to int64) { - - err := ts.RandomTo_(to) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Randperm(n, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandpermOut(out *Tensor, n int64) (retVal *Tensor) { - - retVal, err := RandpermOut(out, n) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRange(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Range(start, end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRangeOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor) { - - retVal, err := RangeOut(out, start, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := RangeStep(start, end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRavel(del bool) (retVal *Tensor) { - - retVal, err := ts.Ravel(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReal(del bool) (retVal *Tensor) { - - retVal, err := ts.Real(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReciprocal(del bool) (retVal *Tensor) { - - retVal, err := ts.Reciprocal(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReciprocal_() { - - err := ts.Reciprocal_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustReciprocalOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ReciprocalOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1dBackwardGradInput(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2dBackwardGradInput(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad3d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad3d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad3dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad3dBackwardGradInput(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad3dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Relu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRelu6(del bool) (retVal *Tensor) { - - retVal, err := ts.Relu6(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRelu6_() { - - err := ts.Relu6_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRelu_() { - - err := ts.Relu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRemainder(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Remainder(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRemainder_(other *Scalar) { - - err := ts.Remainder_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRemainderScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.RemainderScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRemainderScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := RemainderScalarTensor(selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRemainderTensor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.RemainderTensor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRemainderTensor_(other *Tensor) { - - err := ts.RemainderTensor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRemainderTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.RemainderTensorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRenorm(p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Renorm(p, dim, maxnorm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRenorm_(p *Scalar, dim int64, maxnorm *Scalar) { - - err := ts.Renorm_(p, dim, maxnorm) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.RenormOut(out, p, dim, maxnorm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRepeat(repeats []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Repeat(repeats, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRepeatInterleave(repeats *Tensor, outputSize []int64) (retVal *Tensor) { - - retVal, err := RepeatInterleave(repeats, outputSize) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RepeatInterleaveSelfInt(repeats, dim, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RepeatInterleaveSelfTensor(repeats, dim, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1dBackwardGradInput(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2dBackwardGradInput(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3dBackwardGradInput(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRequiresGrad_(requiresGrad bool) { - - err := ts.RequiresGrad_(requiresGrad) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustReshape(shape []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Reshape(shape, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReshapeAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ReshapeAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustResize_(size []int64) { - - err := ts.Resize_(size) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustResizeAs_(theTemplate *Tensor) { - - err := ts.ResizeAs_(theTemplate) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustResizeAsSparse_(theTemplate *Tensor) { - - err := ts.ResizeAsSparse_(theTemplate) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustResolveConj(del bool) (retVal *Tensor) { - - retVal, err := ts.ResolveConj(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustResolveNeg(del bool) (retVal *Tensor) { - - retVal, err := ts.ResolveNeg(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRetainsGrad(del bool) (retVal bool) { - - retVal, err := ts.RetainsGrad(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRnnRelu(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := RnnRelu(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { - - retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := RnnReluData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustRnnTanh(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := RnnTanh(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { - - retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := RnnTanhData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustRoll(shifts []int64, dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Roll(shifts, dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRot90(k int64, dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Rot90(k, dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRound(del bool) (retVal *Tensor) { - - retVal, err := ts.Round(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRound_() { - - err := ts.Round_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRoundOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.RoundOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRowStack(tensors []Tensor) (retVal *Tensor) { - - retVal, err := RowStack(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRowStackOut(out *Tensor, tensors []Tensor) (retVal *Tensor) { - - retVal, err := RowStackOut(out, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRrelu(training bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Rrelu(training, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRrelu_(training bool) { - - err := ts.Rrelu_(training) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRreluWithNoise(noise *Tensor, training bool, del bool) (retVal *Tensor) { - - retVal, err := ts.RreluWithNoise(noise, training, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRreluWithNoise_(noise *Tensor, training bool) { - - err := ts.RreluWithNoise_(noise, training) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool) (retVal *Tensor) { - - retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool) (retVal *Tensor) { - - retVal, err := ts.RreluWithNoiseOut(out, noise, training, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsqrt(del bool) (retVal *Tensor) { - - retVal, err := ts.Rsqrt(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsqrt_() { - - err := ts.Rsqrt_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRsqrtOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.RsqrtOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsub(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Rsub(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsubScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.RsubScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := ScalarTensor(s, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatter(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Scatter(dim, index, src, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatter_(dim int64, index *Tensor, src *Tensor) { - - err := ts.Scatter_(dim, index, src) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustScatterAdd(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterAdd(dim, index, src, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterAdd_(dim int64, index *Tensor, src *Tensor) { - - err := ts.ScatterAdd_(dim, index, src) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterAddOut(out, dim, index, src, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterReduce(dim, index, src, reduce, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string) { - - err := ts.ScatterReduce_(dim, index, src, reduce) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterReduceOut(out, dim, index, src, reduce, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterSrcOut(out, dim, index, src, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterValue(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterValue(dim, index, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterValue_(dim int64, index *Tensor, value *Scalar) { - - err := ts.ScatterValue_(dim, index, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterValueOut(out, dim, index, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterValueReduce(dim, index, value, reduce, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string) { - - err := ts.ScatterValueReduce_(dim, index, value, reduce) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterValueReduceOut(out, dim, index, value, reduce, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSearchsorted(sortedSequence *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Searchsorted(sortedSequence, outInt32, right, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool) (retVal *Tensor) { - - retVal, err := SearchsortedScalar(sortedSequence, selfScalar, outInt32, right) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor) { - - retVal, err := ts.SearchsortedTensorOut(out, sortedSequence, outInt32, right, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, axis int64, unsafety bool, initial *Scalar) (retVal *Tensor) { - - retVal, err := SegmentReduce(data, reduce, lengths, indices, axis, unsafety, initial) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSelect(dim int64, index int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Select(dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64) (retVal *Tensor) { - - retVal, err := SelectBackward(gradOutput, inputSizes, dim, index) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Selu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSelu_() { - - err := ts.Selu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSet_() { - - err := ts.Set_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSetRequiresGrad(r bool, del bool) (retVal *Tensor) { - - retVal, err := ts.SetRequiresGrad(r, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSetSourceTensor_(source *Tensor) { - - err := ts.SetSourceTensor_(source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSgn(del bool) (retVal *Tensor) { - - retVal, err := ts.Sgn(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSgn_() { - - err := ts.Sgn_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSgnOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SgnOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSigmoid(del bool) (retVal *Tensor) { - - retVal, err := ts.Sigmoid(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSigmoid_() { - - err := ts.Sigmoid_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustSigmoidBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := SigmoidBackward(gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := SigmoidBackwardGradInput(gradInput, gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSigmoidOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SigmoidOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSign(del bool) (retVal *Tensor) { - - retVal, err := ts.Sign(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSign_() { - - err := ts.Sign_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSignOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SignOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSignbit(del bool) (retVal *Tensor) { - - retVal, err := ts.Signbit(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSignbitOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SignbitOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSilu(del bool) (retVal *Tensor) { - - retVal, err := ts.Silu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSilu_() { - - err := ts.Silu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSiluBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SiluBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SiluBackwardGradInput(gradInput, gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSiluOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SiluOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSin(del bool) (retVal *Tensor) { - - retVal, err := ts.Sin(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSin_() { - - err := ts.Sin_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSinOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SinOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSinc(del bool) (retVal *Tensor) { - - retVal, err := ts.Sinc(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSinc_() { - - err := ts.Sinc_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSincOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SincOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSinh(del bool) (retVal *Tensor) { - - retVal, err := ts.Sinh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSinh_() { - - err := ts.Sinh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSinhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SinhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlice(dim int64, start []int64, end []int64, step int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Slice(dim, start, end, step, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64) (retVal *Tensor) { - - retVal, err := SliceBackward(gradOutput, inputSizes, dim, start, end, step) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlogdet(del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Slogdet(del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustSlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmm(mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Smm(mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1Loss(target, reduction, beta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, beta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1LossBackwardGradInput(gradInput, gradOutput, target, reduction, beta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1LossOut(out, target, reduction, beta, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLoss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Softmax(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplus(del bool) (retVal *Tensor) { - - retVal, err := ts.Softplus(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, output, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftplusBackwardGradInput(gradInput, gradOutput, beta, threshold, output, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplusOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftplusOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrink(del bool) (retVal *Tensor) { - - retVal, err := ts.Softshrink(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftshrinkBackwardGradInput(gradInput, gradOutput, lambd, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrinkOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftshrinkOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSolve(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Solve(a, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustSolveSolution(solution *Tensor, lu *Tensor, a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.SolveSolution(solution, lu, a, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustSort(dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Sort(dim, descending, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustSortStable(stable bool, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.SortStable(stable, dim, descending, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustSortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.SortValues(values, indices, dim, descending, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustSortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.SortValuesStable(values, indices, stable, dim, descending, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCooTensor(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCooTensorIndices(indices, values, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCooTensorIndicesSize(indices, values, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCsrTensor(crowIndices, colIndices, values, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCsrTensorCrowColValueSize(crowIndices, colIndices, values, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSparseDim(del bool) (retVal int64) { - - retVal, err := ts.SparseDim(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSparseMask(mask *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SparseMask(mask, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64) { - - err := ts.SparseResize_(size, sparseDim, denseDim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64) { - - err := ts.SparseResizeAndClear_(size, sparseDim, denseDim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSpecialDigamma(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialDigamma(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialDigammaOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialDigammaOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialEntr(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialEntr(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialEntrOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialEntrOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErf(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErfOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErfOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErfc(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErfc(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErfcOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErfcOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErfcx(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErfcx(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErfcxOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErfcxOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErfinv(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErfinv(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialErfinvOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialErfinvOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialExp2(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialExp2(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialExp2Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialExp2Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialExpit(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialExpit(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialExpitOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialExpitOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialExpm1(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialExpm1(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialExpm1Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialExpm1Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialGammainc(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialGammainc(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialGammaincOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialGammaincOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialGammaincc(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialGammaincc(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialGammainccOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialGammainccOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialGammaln(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialGammaln(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialGammalnOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialGammalnOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI0(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI0(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI0Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI0Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI0e(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI0e(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI0eOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI0eOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI1(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI1(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI1Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI1Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI1e(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI1e(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialI1eOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialI1eOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialLog1p(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialLog1p(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialLog1pOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialLog1pOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialLogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialLogSoftmax(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialLogit(eps []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialLogit(eps, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialLogitOut(out *Tensor, eps []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialLogitOut(out, eps, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialLogsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialLogsumexp(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialLogsumexpOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialMultigammaln(p int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialMultigammaln(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialMultigammalnOut(out *Tensor, p int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialMultigammalnOut(out, p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialNdtr(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialNdtr(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialNdtrOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialNdtrOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialNdtri(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialNdtri(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialNdtriOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialNdtriOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialPolygamma(n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialPolygamma(n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialPolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialPolygammaOut(out, n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialPsi(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialPsi(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialPsiOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialPsiOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialRound(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialRound(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialRoundOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialRoundOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialSinc(del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialSinc(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialSincOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialSincOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlog1py(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlog1py(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlog1pyOtherScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlog1pyOtherScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlog1pyOtherScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlog1pyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlog1pyOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := SpecialXlog1pySelfScalar(selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := SpecialXlog1pySelfScalarOut(out, selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlogy(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlogy(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlogyOtherScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlogyOtherScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlogyOtherScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialXlogyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialXlogyOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := SpecialXlogySelfScalar(selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := SpecialXlogySelfScalarOut(out, selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialZeta(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialZeta(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialZetaOtherScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialZetaOtherScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialZetaOtherScalarOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSpecialZetaOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SpecialZetaOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := SpecialZetaSelfScalar(selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := SpecialZetaSelfScalarOut(out, selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqrt(del bool) (retVal *Tensor) { - - retVal, err := ts.Sqrt(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqrt_() { - - err := ts.Sqrt_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSqrtOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SqrtOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSquare(del bool) (retVal *Tensor) { - - retVal, err := ts.Square(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSquare_() { - - err := ts.Square_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSquareOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SquareOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqueeze(del bool) (retVal *Tensor) { - - retVal, err := ts.Squeeze(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqueeze_() { - - err := ts.Squeeze_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSqueezeDim(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SqueezeDim(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqueezeDim_(dim int64) { - - err := ts.SqueezeDim_(dim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSspaddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Sspaddmm(mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SspaddmmOut(out, mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustStack(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Stack(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustStackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := StackOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStd(unbiased bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Std(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStdCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.StdCorrection(dim, correction, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStdCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.StdCorrectionOut(out, dim, correction, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStdDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.StdDim(dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStdMean(unbiased bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.StdMean(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustStdMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.StdMeanCorrection(dim, correction, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustStdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.StdMeanDim(dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustStdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, returnComplex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSub(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Sub(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSub_(other *Tensor) { - - err := ts.Sub_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSubOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SubOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSubScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SubScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSubScalar_(other *Scalar) { - - err := ts.SubScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSubtract(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Subtract(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSubtract_(other *Tensor) { - - err := ts.Subtract_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSubtractOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SubtractOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSubtractScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SubtractScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSubtractScalar_(other *Scalar) { - - err := ts.SubtractScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSum(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Sum(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.SumDimIntlist(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.SumIntlistOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSumToSize(size []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SumToSize(size, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSvd(some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.Svd(some, computeUv, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustSvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.SvdU(u, s, v, some, computeUv, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustSwapaxes(axis0 int64, axis1 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Swapaxes(axis0, axis1, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSwapaxes_(axis0 int64, axis1 int64) { - - err := ts.Swapaxes_(axis0, axis1) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSwapdims(dim0 int64, dim1 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Swapdims(dim0, dim1, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSwapdims_(dim0 int64, dim1 int64) { - - err := ts.Swapdims_(dim0, dim1) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSymeig(eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Symeig(eigenvectors, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustSymeigE(e *Tensor, v *Tensor, eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.SymeigE(e, v, eigenvectors, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustT(del bool) (retVal *Tensor) { - - retVal, err := ts.T(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustT_() { - - err := ts.T_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTake(index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Take(index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTakeAlongDim(indices *Tensor, dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.TakeAlongDim(indices, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.TakeAlongDimOut(out, indices, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTakeOut(out *Tensor, index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TakeOut(out, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTan(del bool) (retVal *Tensor) { - - retVal, err := ts.Tan(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTan_() { - - err := ts.Tan_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTanOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TanOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTanh(del bool) (retVal *Tensor) { - - retVal, err := ts.Tanh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTanh_() { - - err := ts.Tanh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTanhBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := TanhBackward(gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := TanhBackwardGradInput(gradInput, gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTanhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TanhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.TensordotOut(out, other, dimsSelf, dimsOther, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustThreshold(threshold *Scalar, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Threshold(threshold, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustThreshold_(threshold *Scalar, value *Scalar) { - - err := ts.Threshold_(threshold, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ThresholdBackward(gradOutput, threshold, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ThresholdBackwardGradInput(gradInput, gradOutput, threshold, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ThresholdOut(out, threshold, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTile(dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Tile(dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTo(device gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.To(device, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToDense(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.ToDense(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustToDenseBackward(grad *Tensor, input *Tensor) (retVal *Tensor) { - - retVal, err := ToDenseBackward(grad, input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.ToDevice(device, dtype, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.ToDtype(dtype, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.ToDtypeLayout(optionsKind, optionsDevice, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToMkldnn(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.ToMkldnn(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustToMkldnnBackward(grad *Tensor, input *Tensor) (retVal *Tensor) { - - retVal, err := ToMkldnnBackward(grad, input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToOther(other *Tensor, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.ToOther(other, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToSparse(del bool) (retVal *Tensor) { - - retVal, err := ts.ToSparse(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToSparseSparseDim(sparseDim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ToSparseSparseDim(sparseDim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTopk(k int64, dim int64, largest bool, sorted bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.Topk(k, dim, largest, sorted, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustTopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.TopkValues(values, indices, k, dim, largest, sorted, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustTotype(scalarType gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Totype(scalarType, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrace(del bool) (retVal *Tensor) { - - retVal, err := ts.Trace(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTraceBackward(grad *Tensor, sizes []int64) (retVal *Tensor) { - - retVal, err := TraceBackward(grad, sizes) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Transpose(dim0, dim1, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTranspose_(dim0 int64, dim1 int64) { - - err := ts.Transpose_(dim0, dim1) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTrapezoid(y *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Trapezoid(y, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTrapezoidX(y *Tensor, x *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := TrapezoidX(y, x, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTrapz(y *Tensor, x *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Trapz(y, x, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTrapzDx(y *Tensor, dx float64, dim int64) (retVal *Tensor) { - - retVal, err := TrapzDx(y, dx, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.TriangularSolve(a, upper, transpose, unitriangular, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustTriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.TriangularSolveX(x, m, a, upper, transpose, unitriangular, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustTril(diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Tril(diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTril_(diagonal int64) { - - err := ts.Tril_(diagonal) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrilOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.TrilOut(out, diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64) (retVal *Tensor) { - - retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTriu(diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Triu(diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTriu_(diagonal int64) { - - err := ts.Triu_(diagonal) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTriuOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.TriuOut(out, diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrueDivide(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TrueDivide(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrueDivide_(other *Tensor) { - - err := ts.TrueDivide_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTrueDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TrueDivideOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrueDivideScalar(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.TrueDivideScalar(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrueDivideScalar_(other *Scalar) { - - err := ts.TrueDivideScalar_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTrunc(del bool) (retVal *Tensor) { - - retVal, err := ts.Trunc(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrunc_() { - - err := ts.Trunc_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTruncOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TruncOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTypeAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TypeAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUnflatten(dim int64, sizes []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Unflatten(dim, sizes, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Unfold(dimension, size, step, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64) (retVal *Tensor) { - - retVal, err := UnfoldBackward(gradIn, inputSizes, dim, size, step) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUniform_(from float64, to float64) { - - err := ts.Uniform_(from, to) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustUniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.UniqueConsecutive(returnInverse, returnCounts, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustUniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.UniqueDim(dim, sorted, returnInverse, returnCounts, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustUniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { - - retVal0, retVal1, retVal2, err := ts.UniqueDimConsecutive(dim, returnInverse, returnCounts, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1, retVal2 -} - -func (ts *Tensor) MustUnsqueeze(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Unsqueeze(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUnsqueeze_(dim int64) { - - err := ts.Unsqueeze_(dim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleBicubic2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleBilinear2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor) { - - retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor) { - - retVal, err := UpsampleLinear1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest1d(outputSize []int64, scales []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest1d(outputSize, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor) { - - retVal, err := UpsampleTrilinear3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool) (retVal *Tensor) { - - retVal, err := ValueSelectingReductionBackward(grad, dim, indices, sizes, keepdim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustValues(del bool) (retVal *Tensor) { - - retVal, err := ts.Values(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustVander(x *Tensor, n []int64, increasing bool) (retVal *Tensor) { - - retVal, err := Vander(x, n, increasing) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVar(unbiased bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Var(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVarCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.VarCorrection(dim, correction, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVarCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.VarCorrectionOut(out, dim, correction, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVarDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.VarDim(dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVarMean(unbiased bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.VarMean(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustVarMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.VarMeanCorrection(dim, correction, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustVarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor) { - - retVal0, retVal1, err := ts.VarMeanDim(dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal0, retVal1 -} - -func (ts *Tensor) MustVarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVdot(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Vdot(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVdotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.VdotOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustView(size []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.View(size, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustViewAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ViewAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustViewAsComplex(del bool) (retVal *Tensor) { - - retVal, err := ts.ViewAsComplex(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustViewAsReal(del bool) (retVal *Tensor) { - - retVal, err := ts.ViewAsReal(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustViewDtype(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.ViewDtype(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustVstack(tensors []Tensor) (retVal *Tensor) { - - retVal, err := Vstack(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustVstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor) { - - retVal, err := VstackOut(out, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustWhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar) (retVal *Tensor) { - - retVal, err := WhereScalar(condition, selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustWhereScalarother(condition *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.WhereScalarother(condition, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustWhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := WhereScalarself(condition, selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustWhereSelf(condition *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.WhereSelf(condition, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustXlogy(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Xlogy(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustXlogy_(other *Tensor) { - - err := ts.Xlogy_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustXlogyOutscalarOther(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.XlogyOutscalarOther(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustXlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := XlogyOutscalarSelf(out, selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustXlogyOuttensor(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.XlogyOuttensor(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustXlogyScalarOther(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.XlogyScalarOther(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustXlogyScalarOther_(other *Scalar) { - - err := ts.XlogyScalarOther_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustXlogyScalarSelf(selfScalar *Scalar, other *Tensor) (retVal *Tensor) { - - retVal, err := XlogyScalarSelf(selfScalar, other) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustZero_() { - - err := ts.Zero_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Zeros(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustZerosLike(del bool) (retVal *Tensor) { - - retVal, err := ts.ZerosLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustZerosOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := ZerosOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -// End of implementing Tensor ================================= +func(ts *Tensor) Must__And_(other *Scalar)() { + + err := ts.__And_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__AndTensor_(other *Tensor)() { + + err := ts.__AndTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Iand_(other *Scalar)() { + + err := ts.__Iand_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__IandTensor_(other *Tensor)() { + + err := ts.__IandTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ilshift_(other *Scalar)() { + + err := ts.__Ilshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__IlshiftTensor_(other *Tensor)() { + + err := ts.__IlshiftTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ior_(other *Scalar)() { + + err := ts.__Ior_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__IorTensor_(other *Tensor)() { + + err := ts.__IorTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Irshift_(other *Scalar)() { + + err := ts.__Irshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__IrshiftTensor_(other *Tensor)() { + + err := ts.__IrshiftTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ixor_(other *Scalar)() { + + err := ts.__Ixor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__IxorTensor_(other *Tensor)() { + + err := ts.__IxorTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Lshift_(other *Scalar)() { + + err := ts.__Lshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__LshiftTensor_(other *Tensor)() { + + err := ts.__LshiftTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Or_(other *Scalar)() { + + err := ts.__Or_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__OrTensor_(other *Tensor)() { + + err := ts.__OrTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Rshift_(other *Scalar)() { + + err := ts.__Rshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__RshiftTensor_(other *Tensor)() { + + err := ts.__RshiftTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Xor_(other *Scalar)() { + + err := ts.__Xor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__XorTensor_(other *Tensor)() { + + err := ts.__XorTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool3d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool3dBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddBatchDim(batchDim int64, level int64, del bool)(retVal *Tensor) { + + retVal, err := ts._AddBatchDim(batchDim, level, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddRelu(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._AddRelu(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddRelu_(other *Tensor)() { + + err := ts._AddRelu_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_AddReluOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._AddReluOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddReluScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts._AddReluScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddReluScalar_(other *Scalar)() { + + err := ts._AddReluScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_Aminmax(del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._Aminmax(del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_AminmaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._AminmaxDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)() { + + err := ts._AmpUpdateScale_(growthTracker, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_AutocastToFullPrecision(cudaEnabled bool, cpuEnabled bool, del bool)(retVal *Tensor) { + + retVal, err := ts._AutocastToFullPrecision(cudaEnabled, cpuEnabled, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AutocastToReducedPrecision(cudaEnabled bool, cpuEnabled bool, cudaDtype gotch.DType, cpuDtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._AutocastToReducedPrecision(cudaEnabled, cpuEnabled, cudaDtype, cpuDtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastByte(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastByte(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastChar(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastChar(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastDouble(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastDouble(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastFloat(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastFloat(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastHalf(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastHalf(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastInt(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastInt(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastLong(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastLong(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastShort(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastShort(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Cat(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _Cat(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _CatOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor) { + + retVal, err := _CdistBackward(grad, x1, x2, p, cdist) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CholeskySolveHelper(a *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CholeskySolveHelper(a, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Coalesce(del bool)(retVal *Tensor) { + + retVal, err := ts._Coalesce(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Coalesced_(coalesced bool)() { + + err := ts._Coalesced_(coalesced) + if err != nil { log.Fatal(err) } + + return +} + +func Must_ComputeLinearCombination(input *Tensor, coefficients *Tensor)(retVal *Tensor) { + + retVal, err := _ComputeLinearCombination(input, coefficients) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor)(retVal *Tensor) { + + retVal, err := _ComputeLinearCombinationOut(out, input, coefficients) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Conj(del bool)(retVal *Tensor) { + + retVal, err := ts._Conj(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ConjPhysical(del bool)(retVal *Tensor) { + + retVal, err := ts._ConjPhysical(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._ConvDepthwise2d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._ConvDepthwise2dOut(out, weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts._ConvertIndicesFromCooToCsr(size, outInt32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts._ConvertIndicesFromCooToCsrOut(out, size, outInt32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ConvertIndicesFromCsrToCoo(crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor) { + + retVal, err := _ConvertIndicesFromCsrToCoo(crowIndices, colIndices, outInt32, transpose) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ConvertIndicesFromCsrToCooOut(out *Tensor, crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor) { + + retVal, err := _ConvertIndicesFromCsrToCooOut(out, crowIndices, colIndices, outInt32, transpose) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor) { + + retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled, allowTf32) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := _ConvolutionDeprecated(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := _ConvolutionMode(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CopyFrom(dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CopyFrom(dst, nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CopyFromAndResize(dst *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._CopyFromAndResize(dst, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func Must_CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor) { + + retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _CudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank, deterministic, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CudnnRnn(input *Tensor, weight []Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, retVal4, err := _CudnnRnn(input, weight, weightStride0, weightBuf, hx, cx, mode, hiddenSize, projSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3, retVal4 +} + +func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor) { + + retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, batchFirst, bidirectional) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CufftGetPlanCacheMaxSize(deviceIndex int64)(retVal int64) { + + retVal, err := _CufftGetPlanCacheMaxSize(deviceIndex) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CufftGetPlanCacheSize(deviceIndex int64)(retVal int64) { + + retVal, err := _CufftGetPlanCacheSize(deviceIndex) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_DebugHasInternalOverlap(del bool)(retVal int64) { + + retVal, err := ts._DebugHasInternalOverlap(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_DetLuBasedHelper(del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts._DetLuBasedHelper(del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) Must_DetLuBasedHelperBackwardHelper(detGrad *Tensor, det *Tensor, lu *Tensor, pivs *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._DetLuBasedHelperBackwardHelper(detGrad, det, lu, pivs, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_DimArange(like *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _DimArange(like, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Dimi(del bool)(retVal int64) { + + retVal, err := ts._Dimi(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Dimv(del bool)(retVal int64) { + + retVal, err := ts._Dimv(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor) { + + retVal, err := _DirichletGrad(x, alpha, total) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Efficientzerotensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _Efficientzerotensor(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func Must_EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) { + + retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights, paddingIdx) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) { + + retVal, err := _EmbeddingBagDenseBackward(grad, indices, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBagForwardOnly(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func Must_EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64)(retVal *Tensor) { + + retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode, paddingIdx) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) { + + retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal *Tensor) { + + retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EuclideanDist(x1 *Tensor, x2 *Tensor)(retVal *Tensor) { + + retVal, err := _EuclideanDist(x1, x2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) { + + retVal, err := ts._FakeQuantizeLearnablePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) { + + retVal, err := ts._FakeQuantizeLearnablePerTensorAffine(scale, zeroPoint, quantMin, quantMax, gradFactor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, gradFactor, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) Must_FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._FakeQuantizePerTensorAffineCachemaskTensorQparams(scale, zeroPoint, fakeQuantEnabled, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_FftC2c(dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor) { + + retVal, err := ts._FftC2c(dim, normalization, forward, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor) { + + retVal, err := ts._FftC2cOut(out, dim, normalization, forward, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor) { + + retVal, err := ts._FftC2r(dim, normalization, lastDimSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor) { + + retVal, err := ts._FftC2rOut(out, dim, normalization, lastDimSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FftR2c(dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor) { + + retVal, err := ts._FftR2c(dim, normalization, onesided, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor) { + + retVal, err := ts._FftR2cOut(out, dim, normalization, onesided, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FusedDropout(p float64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._FusedDropout(p, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._FusedMovingAvgObsFqHelper(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_FwPrimal(level int64, del bool)(retVal *Tensor) { + + retVal, err := ts._FwPrimal(level, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._GatherSparseBackward(dim, index, grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := _GridSampler2dCpuFallback(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _GridSampler2dCpuFallbackBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bool) { + + retVal, err := ts._HasCompatibleShallowCopyType(from, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_HasSameStorageNumel(other *Tensor, del bool)(retVal bool) { + + retVal, err := ts._HasSameStorageNumel(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_HistogramddFromBinTensors(bins []Tensor, weight *Tensor, density bool, del bool)(retVal *Tensor) { + + retVal, err := ts._HistogramddFromBinTensors(bins, weight, density, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_IndexCopy_(dim int64, index *Tensor, source *Tensor)() { + + err := ts._IndexCopy_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_Indices(del bool)(retVal *Tensor) { + + retVal, err := ts._Indices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_IsZerotensor(del bool)(retVal bool) { + + retVal, err := ts._IsZerotensor(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LinalgInvOutHelper_(infosLu *Tensor, infosGetri *Tensor)() { + + err := ts._LinalgInvOutHelper_(infosLu, infosGetri) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_LinalgQrHelper(mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._LinalgQrHelper(mode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func Must_LinalgSvd(a *Tensor, fullMatrices bool, computeUv bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := _LinalgSvd(a, fullMatrices, computeUv) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func Must_LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, computeUv bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := _LinalgSvdU(u, s, vh, a, fullMatrices, computeUv) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._LogSoftmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) { + + retVal, err := _LogSoftmaxBackwardData(gradOutput, output, dim, inputDtype) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) { + + retVal, err := _LogSoftmaxBackwardDataOut(out, gradOutput, output, dim, inputDtype) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._LogSoftmaxOut(out, dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Logcumsumexp(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._Logcumsumexp(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._LogcumsumexpOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LuWithInfo(pivot bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts._LuWithInfo(pivot, checkErrors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func Must_MakeDual(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor) { + + retVal, err := _MakeDual(primal, tangent, level) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MaskedScale(mask *Tensor, scale float64, del bool)(retVal *Tensor) { + + retVal, err := ts._MaskedScale(mask, scale, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MaskedSoftmax(mask *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._MaskedSoftmax(mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MkldnnReshape(shape []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MkldnnReshape(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MkldnnTranspose(dim0, dim1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64)() { + + err := ts._MkldnnTranspose_(dim0, dim1) + if err != nil { log.Fatal(err) } + + return +} + +func Must_NativeMultiHeadSelfAttention(query *Tensor, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor)(retVal *Tensor) { + + retVal, err := _NativeMultiHeadSelfAttention(query, qkvWeight, qkvBias, projWeight, projBias, mask) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_NegView(del bool)(retVal *Tensor) { + + retVal, err := ts._NegView(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_NewZerosWithSameFeatureMeta(other *Tensor, selfNumBatchDims int64, del bool)(retVal *Tensor) { + + retVal, err := ts._NewZerosWithSameFeatureMeta(other, selfNumBatchDims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackAvailable()(retVal bool) { + + retVal, err := _NnpackAvailable() + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Nnz(del bool)(retVal int64) { + + retVal, err := ts._Nnz(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _PackPaddedSequence(input, lengths, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func Must_PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool)(retVal *Tensor) { + + retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _PadPackedSequence(data, batchSizes, batchFirst, paddingValue, totalLength) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._PdistBackward(grad, p, pdist, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_PinMemory(device gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts._PinMemory(device, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._RemoveBatchDim(level, batchSize, outDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ReshapeAlias(size []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._ReshapeAlias(size, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ReshapeFromTensor(shape *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._ReshapeFromTensor(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _RowwisePrune(weight, mask, compressedIndicesDtype) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_SWhere(condition *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._SWhere(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SampleDirichlet(del bool)(retVal *Tensor) { + + retVal, err := ts._SampleDirichlet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SaturateWeightToFp16(weight *Tensor)(retVal *Tensor) { + + retVal, err := _SaturateWeightToFp16(weight) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, axis int64)(retVal *Tensor) { + + retVal, err := _SegmentReduceBackward(grad, output, data, reduce, lengths, axis) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ShapeAsTensor(del bool)(retVal *Tensor) { + + retVal, err := ts._ShapeAsTensor(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SlowConv2dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts._SlowConv2dBackward(gradInput, gradWeight, gradBias, gradOutput, weight, kernelSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func Must_SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _SobolEngineDraw(quasi, n, sobolstate, dimension, numGenerated, dtype) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64)() { + + err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_SobolEngineInitializeState_(dimension int64)() { + + err := ts._SobolEngineInitializeState_(dimension) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_SobolEngineScramble_(ltm *Tensor, dimension int64)() { + + err := ts._SobolEngineScramble_(ltm, dimension) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._Softmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) { + + retVal, err := _SoftmaxBackwardData(gradOutput, output, dim, inputDtype) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) { + + retVal, err := _SoftmaxBackwardDataOut(gradInput, gradOutput, output, dim, inputDtype) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._SoftmaxOut(out, dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SolveHelper(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._SolveHelper(a, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_SparseAddmm(sparse *Tensor, dense *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseAddmm(sparse, dense, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseBroadcastTo(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseBroadcastTo(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _SparseCsrTensorUnsafe(crowIndices, colIndices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseLogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseLogSoftmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseLogSoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseLogSoftmaxInt(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseMaskHelper(t *Tensor, maskIndices *Tensor)(retVal *Tensor) { + + retVal, err := _SparseMaskHelper(t, maskIndices) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseMm(sparse *Tensor, dense *Tensor)(retVal *Tensor) { + + retVal, err := _SparseMm(sparse, dense) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSoftmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSoftmaxInt(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSparseMatmul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSparseMatmul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSum(del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSum(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSumBackward(grad *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSumBackward(grad, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSumDim(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSumDim(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSumDimDtype(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSumDtype(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSumDtype(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Stack(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _Stack(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_StackOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _StackOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_StandardGamma(del bool)(retVal *Tensor) { + + retVal, err := ts._StandardGamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_StandardGammaGrad(output *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._StandardGammaGrad(output, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SymeigHelper(eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._SymeigHelper(eigenvectors, upper, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func Must_TestAmbiguousDefaults(dummy *Tensor, a int64, b int64)(retVal *Tensor) { + + retVal, err := _TestAmbiguousDefaults(dummy, a, b) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string)(retVal *Tensor) { + + retVal, err := _TestAmbiguousDefaultsB(dummy, a, b) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_TestOptionalFilledIntlist(values *Tensor, addends []int64)(retVal *Tensor) { + + retVal, err := _TestOptionalFilledIntlist(values, addends) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_TestOptionalIntlist(values *Tensor, addends []int64)(retVal *Tensor) { + + retVal, err := _TestOptionalIntlist(values, addends) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_TestSerializationSubcmul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._TestSerializationSubcmul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_TestStringDefault(dummy *Tensor, a string, b string)(retVal *Tensor) { + + retVal, err := _TestStringDefault(dummy, a, b) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_TestWarnInAutograd(del bool)(retVal *Tensor) { + + retVal, err := ts._TestWarnInAutograd(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._ToCopy(optionsKind, optionsDevice, nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_TorchCudaCuLinkerSymbolOp(del bool)(retVal *Tensor) { + + retVal, err := ts._TorchCudaCuLinkerSymbolOp(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor) { + + retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Unique(sorted bool, returnInverse bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts._Unique(sorted, returnInverse, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts._Unique2(sorted, returnInverse, returnCounts, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func Must_UnpackDual(dual *Tensor, level int64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _UnpackDual(dual, level) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) Must_UnsafeView(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._UnsafeView(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleBicubic2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleBicubic2dAa(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleBicubic2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleBicubic2dAaBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleBicubic2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleBicubic2dAaBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleBicubic2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleBicubic2dAaOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleBilinear2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleBilinear2dAa(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleBilinear2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleBilinear2dAaBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleBilinear2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleBilinear2dAaBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleBilinear2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleBilinear2dAaOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleNearestExact1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleNearestExact1d(outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleNearestExact1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) { + + retVal, err := _UpsampleNearestExact1dBackward(gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleNearestExact1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) { + + retVal, err := _UpsampleNearestExact1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleNearestExact1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleNearestExact1dOut(out, outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleNearestExact2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleNearestExact2d(outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleNearestExact2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleNearestExact2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleNearestExact2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleNearestExact2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleNearestExact2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleNearestExact2dOut(out, outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleNearestExact3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleNearestExact3d(outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleNearestExact3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleNearestExact3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UpsampleNearestExact3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := _UpsampleNearestExact3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UpsampleNearestExact3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts._UpsampleNearestExact3dOut(out, outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64)(retVal bool) { + + retVal, err := _UseCudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_UseCudnnRnnFlattenWeight()(retVal bool) { + + retVal, err := _UseCudnnRnnFlattenWeight() + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Values(del bool)(retVal *Tensor) { + + retVal, err := ts._Values(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Version(del bool)(retVal int64) { + + retVal, err := ts._Version(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_WeightNorm(v *Tensor, g *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _WeightNorm(v, g, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_WeightNormCudaInterface(v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _WeightNormCudaInterface(v, g, dim) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func Must_WeightNormCudaInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _WeightNormCudaInterfaceBackward(gradW, savedV, savedG, savedNorms, dim) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func Must_WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := _WeightNormDifferentiableBackward(gradW, savedV, savedG, savedNorms, dim) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAbs(del bool)(retVal *Tensor) { + + retVal, err := ts.Abs(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAbs_()() { + + err := ts.Abs_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAbsOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AbsOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAbsolute(del bool)(retVal *Tensor) { + + retVal, err := ts.Absolute(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAbsolute_()() { + + err := ts.Absolute_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAbsoluteOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AbsoluteOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcos(del bool)(retVal *Tensor) { + + retVal, err := ts.Acos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcos_()() { + + err := ts.Acos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAcosOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AcosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcosh(del bool)(retVal *Tensor) { + + retVal, err := ts.Acosh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcosh_()() { + + err := ts.Acosh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAcoshOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AcoshOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool1d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dBackward(gradInput, gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool1d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.AdaptiveMaxPool1d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAdaptiveMaxPool2d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.AdaptiveMaxPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackwardGradInput(gradInput, gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.AdaptiveMaxPool2dOut(out, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAdaptiveMaxPool3d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.AdaptiveMaxPool3d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackwardGradInput(gradInput, gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.AdaptiveMaxPool3dOut(out, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAdd(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Add(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdd_(other *Tensor)() { + + err := ts.Add_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.AddScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddScalar_(other *Scalar)() { + + err := ts.AddScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addbmm(batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddbmm_(batch1 *Tensor, batch2 *Tensor)() { + + err := ts.Addbmm_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddbmmOut(out, batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addcdiv(tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcdiv_(tensor1 *Tensor, tensor2 *Tensor)() { + + err := ts.Addcdiv_(tensor1, tensor2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcmul(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addcmul(tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcmul_(tensor1 *Tensor, tensor2 *Tensor)() { + + err := ts.Addcmul_(tensor1, tensor2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addmm(mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmm_(mat1 *Tensor, mat2 *Tensor)() { + + err := ts.Addmm_(mat1, mat2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddmmOut(out, mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmv(mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addmv(mat, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmv_(mat *Tensor, vec *Tensor)() { + + err := ts.Addmv_(mat, vec) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddmvOut(out, mat, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddr(vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addr(vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddr_(vec1 *Tensor, vec2 *Tensor)() { + + err := ts.Addr_(vec1, vec2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddrOut(out, vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdjoint(del bool)(retVal *Tensor) { + + retVal, err := ts.Adjoint(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAffineGridGenerator(theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := AffineGridGenerator(theta, size, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAlias(del bool)(retVal *Tensor) { + + retVal, err := ts.Alias(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAlignAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AlignAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAll(del bool)(retVal *Tensor) { + + retVal, err := ts.All(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAllAllOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AllAllOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAllDim(dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AllDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AllOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAllclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal bool) { + + retVal, err := ts.Allclose(other, rtol, atol, equalNan, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := AlphaDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAlphaDropout_(p float64, train bool)() { + + err := ts.AlphaDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Amax(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AmaxOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Amin(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AminOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAminmax(dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Aminmax(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.AminmaxOut(min, max, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustAngle(del bool)(retVal *Tensor) { + + retVal, err := ts.Angle(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAngleOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AngleOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAny(del bool)(retVal *Tensor) { + + retVal, err := ts.Any(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAnyAllOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AnyAllOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAnyDim(dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AnyDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAnyOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AnyOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Arange(end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeOut(out *Tensor, end *Scalar)(retVal *Tensor) { + + retVal, err := ArangeOut(out, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := ArangeStart(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeStartOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) { + + retVal, err := ArangeStartOut(out, start, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeStartStep(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := ArangeStartStep(start, end, step, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccos(del bool)(retVal *Tensor) { + + retVal, err := ts.Arccos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccos_()() { + + err := ts.Arccos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArccosOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArccosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccosh(del bool)(retVal *Tensor) { + + retVal, err := ts.Arccosh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccosh_()() { + + err := ts.Arccosh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArccoshOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArccoshOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsin(del bool)(retVal *Tensor) { + + retVal, err := ts.Arcsin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsin_()() { + + err := ts.Arcsin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArcsinOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArcsinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsinh(del bool)(retVal *Tensor) { + + retVal, err := ts.Arcsinh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsinh_()() { + + err := ts.Arcsinh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArcsinhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArcsinhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctan(del bool)(retVal *Tensor) { + + retVal, err := ts.Arctan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctan2(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Arctan2(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctan2_(other *Tensor)() { + + err := ts.Arctan2_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArctan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Arctan2Out(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctan_()() { + + err := ts.Arctan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArctanOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArctanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Arctanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctanh_()() { + + err := ts.Arctanh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArctanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArctanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Argmax(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.ArgmaxOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Argmin(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.ArgminOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgsort(dim int64, descending bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Argsort(dim, descending, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgwhere(del bool)(retVal *Tensor) { + + retVal, err := ts.Argwhere(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsStrided(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AsStrided(size, stride, storageOffset, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset []int64)() { + + err := ts.AsStrided_(size, stride, storageOffset) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAsin(del bool)(retVal *Tensor) { + + retVal, err := ts.Asin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsin_()() { + + err := ts.Asin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAsinOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AsinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsinh(del bool)(retVal *Tensor) { + + retVal, err := ts.Asinh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsinh_()() { + + err := ts.Asinh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAsinhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AsinhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan(del bool)(retVal *Tensor) { + + retVal, err := ts.Atan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan2(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Atan2(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan2_(other *Tensor)() { + + err := ts.Atan2_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAtan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Atan2Out(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan_()() { + + err := ts.Atan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAtanOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AtanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Atanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtanh_()() { + + err := ts.Atanh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAtanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AtanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtleast1d(del bool)(retVal *Tensor) { + + retVal, err := ts.Atleast1d(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtleast2d(del bool)(retVal *Tensor) { + + retVal, err := ts.Atleast2d(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtleast3d(del bool)(retVal *Tensor) { + + retVal, err := ts.Atleast3d(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBaddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Baddbmm(batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBaddbmm_(batch1 *Tensor, batch2 *Tensor)() { + + err := ts.Baddbmm_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BaddbmmOut(out, batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BartlettWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor)(retVal *Tensor) { + + retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu, count) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := BatchNormBackwardReduce(gradOut, input, mean, invstd, weight, inputG, weightG, biasG) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func MustBatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) { + + retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) { + + retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := BatchNormGatherStats(input, mean, invstd, runningMean, runningVar, momentum, eps, count) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustBatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := BatchNormGatherStatsWithCounts(input, mean, invstd, runningMean, runningVar, momentum, eps, counts) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustBatchNormStats(input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := BatchNormStats(input, eps) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustBatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := BatchNormUpdateStats(input, runningMean, runningVar, momentum) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustBernoulli(del bool)(retVal *Tensor) { + + retVal, err := ts.Bernoulli(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBernoulli_(p *Tensor)() { + + err := ts.Bernoulli_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBernoulliFloat_(p float64)() { + + err := ts.BernoulliFloat_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBernoulliOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BernoulliOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBernoulliP(p float64, del bool)(retVal *Tensor) { + + retVal, err := ts.BernoulliP(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := Bilinear(input1, input2, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackwardGradInput(gradInput, gradOutput, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBincount(weights *Tensor, minlength int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Bincount(weights, minlength, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBinomial(count *Tensor, prob *Tensor)(retVal *Tensor) { + + retVal, err := Binomial(count, prob) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAnd(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAnd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAnd_(other *Scalar)() { + + err := ts.BitwiseAnd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseAndScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAndScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAndTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAndTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAndTensor_(other *Tensor)() { + + err := ts.BitwiseAndTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseAndTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAndTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseLeftShift(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseLeftShift(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseLeftShift_(other *Tensor)() { + + err := ts.BitwiseLeftShift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func MustBitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := BitwiseLeftShiftScalarTensor(selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseLeftShiftTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseLeftShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseLeftShiftTensorScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseLeftShiftTensorScalar_(other *Scalar)() { + + err := ts.BitwiseLeftShiftTensorScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseLeftShiftTensorScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseNot(del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseNot(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseNot_()() { + + err := ts.BitwiseNot_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseNotOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseNotOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOr(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOr(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOr_(other *Scalar)() { + + err := ts.BitwiseOr_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseOrScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOrScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOrTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOrTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOrTensor_(other *Tensor)() { + + err := ts.BitwiseOrTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseOrTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOrTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseRightShift(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseRightShift(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseRightShift_(other *Tensor)() { + + err := ts.BitwiseRightShift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func MustBitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := BitwiseRightShiftScalarTensor(selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseRightShiftTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseRightShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseRightShiftTensorScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseRightShiftTensorScalar_(other *Scalar)() { + + err := ts.BitwiseRightShiftTensorScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseRightShiftTensorScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXor(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXor_(other *Scalar)() { + + err := ts.BitwiseXor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseXorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXorScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXorTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXorTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXorTensor_(other *Tensor)() { + + err := ts.BitwiseXorTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseXorTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXorTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BlackmanWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlockDiag(tensors []Tensor)(retVal *Tensor) { + + retVal, err := BlockDiag(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBmm(mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Bmm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBmmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BmmOut(out, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBroadcastTo(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BroadcastTo(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBucketize(boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Bucketize(boundaries, outInt32, right, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor) { + + retVal, err := BucketizeScalar(selfScalar, boundaries, outInt32, right) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) { + + retVal, err := ts.BucketizeTensorOut(out, boundaries, outInt32, right, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCanCast(from gotch.DType, to gotch.DType)(retVal bool) { + + retVal, err := CanCast(from, to) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCartesianProd(tensors []Tensor)(retVal *Tensor) { + + retVal, err := CartesianProd(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCat(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Cat(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := CatOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCauchy_(median float64, sigma float64)() { + + err := ts.Cauchy_(median, sigma) + if err != nil { log.Fatal(err) } + + return +} + +func MustCdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64)(retVal *Tensor) { + + retVal, err := Cdist(x1, x2, p, computeMode) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCeil(del bool)(retVal *Tensor) { + + retVal, err := ts.Ceil(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCeil_()() { + + err := ts.Ceil_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCeilOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CeilOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Celu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCelu_()() { + + err := ts.Celu_() + if err != nil { log.Fatal(err) } + + return +} + +func MustChainMatmul(matrices []Tensor)(retVal *Tensor) { + + retVal, err := ChainMatmul(matrices) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustChainMatmulOut(out *Tensor, matrices []Tensor)(retVal *Tensor) { + + retVal, err := ChainMatmulOut(out, matrices) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustChannelShuffle(groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ChannelShuffle(groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholesky(upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Cholesky(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskyInverse(upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskyInverse(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskyInverseOut(out *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskyInverseOut(out, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskyOut(out, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskySolve(input2 *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskySolve(input2, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskySolveOut(out, input2, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ChooseQparamsOptimized(input, numel, nBins, ratio, bitWidth) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustClamp(min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Clamp(min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClamp_(min *Scalar, max *Scalar)() { + + err := ts.Clamp_(min, max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMax(max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMax(max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMax_(max *Scalar)() { + + err := ts.ClampMax_(max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMaxOut(out *Tensor, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMaxOut(out, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMaxTensor(max *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMaxTensor(max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMaxTensor_(max *Tensor)() { + + err := ts.ClampMaxTensor_(max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMaxTensorOut(out *Tensor, max *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMaxTensorOut(out, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMin(min *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMin(min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMin_(min *Scalar)() { + + err := ts.ClampMin_(min) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMinOut(out *Tensor, min *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMinOut(out, min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMinTensor(min *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMinTensor(min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMinTensor_(min *Tensor)() { + + err := ts.ClampMinTensor_(min) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMinTensorOut(out *Tensor, min *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMinTensorOut(out, min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampOut(out, min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampTensor(min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampTensor_(min *Tensor, max *Tensor)() { + + err := ts.ClampTensor_(min, max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampTensorOut(out, min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClip(min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Clip(min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClip_(min *Scalar, max *Scalar)() { + + err := ts.Clip_(min, max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClipOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClipOut(out, min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClipTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClipTensor(min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClipTensor_(min *Tensor, max *Tensor)() { + + err := ts.ClipTensor_(min, max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ClipTensorOut(out, min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCoalesce(del bool)(retVal *Tensor) { + + retVal, err := ts.Coalesce(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCol2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCol2imBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Col2imBackwardGradInput(gradInput, gradOutput, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCol2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustColIndices(del bool)(retVal *Tensor) { + + retVal, err := ts.ColIndices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustColumnStack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := ColumnStack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustColumnStackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := ColumnStackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCombinations(r int64, withReplacement bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Combinations(r, withReplacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustComplex(real *Tensor, imag *Tensor)(retVal *Tensor) { + + retVal, err := Complex(real, imag) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustComplexOut(out *Tensor, real *Tensor, imag *Tensor)(retVal *Tensor) { + + retVal, err := ComplexOut(out, real, imag) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConcat(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Concat(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConcatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := ConcatOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConj(del bool)(retVal *Tensor) { + + retVal, err := ts.Conj(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConjPhysical(del bool)(retVal *Tensor) { + + retVal, err := ts.ConjPhysical(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConjPhysical_()() { + + err := ts.ConjPhysical_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ConjPhysicalOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConstantPadNd(pad []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ConstantPadNd(pad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustContiguous(del bool)(retVal *Tensor) { + + retVal, err := ts.Contiguous(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv1dPadding(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv2dPadding(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv3dPadding(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ConvDepthwise3d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ConvTbc(weight, bias, pad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts.ConvTbcBackward(input, weight, bias, pad, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) { + + retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) { + + retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) { + + retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) { + + retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) { + + retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCopySparseToSparse_(src *Tensor, nonBlocking bool)() { + + err := ts.CopySparseToSparse_(src, nonBlocking) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCopysign(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Copysign(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCopysign_(other *Tensor)() { + + err := ts.Copysign_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCopysignOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CopysignOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCopysignScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.CopysignScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCopysignScalar_(other *Scalar)() { + + err := ts.CopysignScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCopysignScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.CopysignScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCorrcoef(del bool)(retVal *Tensor) { + + retVal, err := ts.Corrcoef(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCos(del bool)(retVal *Tensor) { + + retVal, err := ts.Cos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCos_()() { + + err := ts.Cos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCosOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCosh(del bool)(retVal *Tensor) { + + retVal, err := ts.Cosh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCosh_()() { + + err := ts.Cosh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCoshOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CoshOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) { + + retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64)(retVal *Tensor) { + + retVal, err := CosineSimilarity(x1, x2, dim, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCountNonzero(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CountNonzero(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCountNonzeroDimIntlist(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CountNonzeroDimIntlist(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCov(correction int64, fweights *Tensor, aweights *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Cov(correction, fweights, aweights, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCross(other *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Cross(other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool)(retVal *Tensor) { + + retVal, err := ts.CrossEntropyLoss(target, weight, reduction, ignoreIndex, labelSmoothing, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCrossOut(out *Tensor, other *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CrossOut(out, other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCrowIndices(del bool)(retVal *Tensor) { + + retVal, err := ts.CrowIndices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) { + + retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) { + + retVal, err := CtcLossTensor(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) { + + retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) { + + retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := CudnnBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func MustCudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := CudnnBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon, reserveSpace) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustCudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionAddRelu(weight, z, alpha, bias, stride, padding, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionRelu(weight, bias, stride, padding, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnGridSampler(grid, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.CudnnGridSamplerBackward(grid, gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustCudnnIsAcceptable(del bool)(retVal bool) { + + retVal, err := ts.CudnnIsAcceptable(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCummax(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Cummax(dim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustCummaxOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.CummaxOut(values, indices, dim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustCummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := CummaxminBackward(grad, input, indices, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCummin(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Cummin(dim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustCumminOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.CumminOut(values, indices, dim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Cumprod(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumprod_(dim int64, dtype gotch.DType)() { + + err := ts.Cumprod_(dim, dtype) + if err != nil { log.Fatal(err) } + + return +} + +func MustCumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor)(retVal *Tensor) { + + retVal, err := CumprodBackward(grad, input, dim, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.CumprodOut(out, dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Cumsum(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumsum_(dim int64, dtype gotch.DType)() { + + err := ts.Cumsum_(dim, dtype) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.CumsumOut(out, dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCumulativeTrapezoid(y *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := CumulativeTrapezoid(y, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := CumulativeTrapezoidX(y, x, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustData(del bool)(retVal *Tensor) { + + retVal, err := ts.Data(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDeg2rad(del bool)(retVal *Tensor) { + + retVal, err := ts.Deg2rad(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDeg2rad_()() { + + err := ts.Deg2rad_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDeg2radOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Deg2radOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDenseDim(del bool)(retVal int64) { + + retVal, err := ts.DenseDim(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDequantize(del bool)(retVal *Tensor) { + + retVal, err := ts.Dequantize(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDet(del bool)(retVal *Tensor) { + + retVal, err := ts.Det(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDetach(del bool)(retVal *Tensor) { + + retVal, err := ts.Detach(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDetach_()() { + + err := ts.Detach_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDiag(diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Diag(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDiagBackward(grad *Tensor, inputSizes []int64, diagonal int64)(retVal *Tensor) { + + retVal, err := DiagBackward(grad, inputSizes, diagonal) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.DiagEmbed(offset, dim1, dim2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.DiagOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagflat(offset int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Diagflat(offset, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Diagonal(offset, dim1, dim2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor) { + + retVal, err := DiagonalBackward(gradOutput, inputSizes, offset, dim1, dim2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagonalScatter(src *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.DiagonalScatter(src, offset, dim1, dim2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Diff(n, dim, prepend, append, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DiffOut(out, n, dim, prepend, append, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDigamma(del bool)(retVal *Tensor) { + + retVal, err := ts.Digamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDigamma_()() { + + err := ts.Digamma_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDigammaOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DigammaOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDist(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Dist(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiv(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Div(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiv_(other *Tensor)() { + + err := ts.Div_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DivOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor) { + + retVal, err := ts.DivOutMode(out, other, roundingMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.DivScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivScalar_(other *Scalar)() { + + err := ts.DivScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor) { + + retVal, err := ts.DivScalarMode(other, roundingMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivScalarMode_(other *Scalar, roundingMode string)() { + + err := ts.DivScalarMode_(other, roundingMode) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor) { + + retVal, err := ts.DivTensorMode(other, roundingMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivTensorMode_(other *Tensor, roundingMode string)() { + + err := ts.DivTensorMode_(other, roundingMode) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivide(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Divide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivide_(other *Tensor)() { + + err := ts.Divide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor) { + + retVal, err := ts.DivideOutMode(out, other, roundingMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivideScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.DivideScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivideScalar_(other *Scalar)() { + + err := ts.DivideScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivideScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor) { + + retVal, err := ts.DivideScalarMode(other, roundingMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivideScalarMode_(other *Scalar, roundingMode string)() { + + err := ts.DivideScalarMode_(other, roundingMode) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivideTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor) { + + retVal, err := ts.DivideTensorMode(other, roundingMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivideTensorMode_(other *Tensor, roundingMode string)() { + + err := ts.DivideTensorMode_(other, roundingMode) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDot(tensor *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Dot(tensor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDotOut(out *Tensor, tensor *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DotOut(out, tensor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := Dropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDropout_(p float64, train bool)() { + + err := ts.Dropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func MustDstack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := Dstack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := DstackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEig(eigenvectors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Eig(eigenvectors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustEigE(e *Tensor, v *Tensor, eigenvectors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.EigE(e, v, eigenvectors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustEinsum(equation string, tensors []Tensor)(retVal *Tensor) { + + retVal, err := Einsum(equation, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustElu(del bool)(retVal *Tensor) { + + retVal, err := ts.Elu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustElu_()() { + + err := ts.Elu_() + if err != nil { log.Fatal(err) } + + return +} + +func MustEluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor) { + + retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, isResult, selfOrResult) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor) { + + retVal, err := EluBackwardGradInput(gradInput, gradOutput, alpha, scale, inputScale, isResult, selfOrResult) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEluOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.EluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) { + + retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) { + + retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func MustEmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := EmbeddingBagPaddingIdx(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func MustEmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) { + + retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64)() { + + err := ts.EmbeddingRenorm_(indices, maxNorm, normType) + if err != nil { log.Fatal(err) } + + return +} + +func MustEmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) { + + retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Empty(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEmptyLike(del bool)(retVal *Tensor) { + + retVal, err := ts.EmptyLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := EmptyOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := EmptyQuantized(size, qtensor, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEq(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Eq(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEq_(other *Scalar)() { + + err := ts.Eq_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustEqScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.EqScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEqTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.EqTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEqTensor_(other *Tensor)() { + + err := ts.EqTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustEqTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.EqTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEqual(other *Tensor, del bool)(retVal bool) { + + retVal, err := ts.Equal(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErf(del bool)(retVal *Tensor) { + + retVal, err := ts.Erf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErf_()() { + + err := ts.Erf_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustErfOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ErfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfc(del bool)(retVal *Tensor) { + + retVal, err := ts.Erfc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfc_()() { + + err := ts.Erfc_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustErfcOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ErfcOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfinv(del bool)(retVal *Tensor) { + + retVal, err := ts.Erfinv(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfinv_()() { + + err := ts.Erfinv_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustErfinvOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ErfinvOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp(del bool)(retVal *Tensor) { + + retVal, err := ts.Exp(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp2(del bool)(retVal *Tensor) { + + retVal, err := ts.Exp2(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp2_()() { + + err := ts.Exp2_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustExp2Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Exp2Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp_()() { + + err := ts.Exp_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustExpOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ExpOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpand(size []int64, implicit bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Expand(size, implicit, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpandAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ExpandAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpm1(del bool)(retVal *Tensor) { + + retVal, err := ts.Expm1(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpm1_()() { + + err := ts.Expm1_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustExpm1Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Expm1Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExponential_(lambd float64)() { + + err := ts.Exponential_(lambd) + if err != nil { log.Fatal(err) } + + return +} + +func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Eye(n, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := EyeM(n, m, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEyeMOut(out *Tensor, n int64, m int64)(retVal *Tensor) { + + retVal, err := EyeMOut(out, n, m) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEyeOut(out *Tensor, n int64)(retVal *Tensor) { + + retVal, err := EyeOut(out, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.FakeQuantizePerChannelAffineCachemask(scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustFakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor) { + + retVal, err := FakeQuantizePerChannelAffineCachemaskBackward(grad, mask) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.FakeQuantizePerTensorAffineCachemask(scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustFakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor) { + + retVal, err := FakeQuantizePerTensorAffineCachemaskBackward(grad, mask) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffineTensorQparams(scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackGemmMatrixFp16(input *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmPackGemmMatrixFp16(input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackQuantizedMatrix(input *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmPackQuantizedMatrix(input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64)(retVal *Tensor) { + + retVal, err := FbgemmPackQuantizedMatrixKn(input, k, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFeatureAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := FeatureAlphaDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFeatureAlphaDropout_(p float64, train bool)() { + + err := ts.FeatureAlphaDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func MustFeatureDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := FeatureDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFeatureDropout_(p float64, train bool)() { + + err := ts.FeatureDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFftFft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFft2(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFft2Out(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFftOut(out, n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := FftFftfreq(n, d, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFftFftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor) { + + retVal, err := FftFftfreqOut(out, n, d) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFftnOut(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFftshift(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFftshift(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftHfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftHfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftHfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftHfft2(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftHfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftHfft2Out(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftHfftOut(out, n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftHfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftHfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftHfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftHfftnOut(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfft2(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfft2Out(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfftOut(out, n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfftnOut(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfftshift(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfftshift(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIhfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIhfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIhfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIhfft2(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIhfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIhfft2Out(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIhfftOut(out, n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIhfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIhfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIhfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIhfftnOut(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfft2(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfft2Out(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfftOut(out, n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfftnOut(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfft2(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfft2Out(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfftOut(out, n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := FftRfftfreq(n, d, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFftRfftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor) { + + retVal, err := FftRfftfreqOut(out, n, d) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfftnOut(out, s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFill_(value *Scalar)() { + + err := ts.Fill_(value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFillDiagonal_(fillValue *Scalar, wrap bool)() { + + err := ts.FillDiagonal_(fillValue, wrap) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFillTensor_(value *Tensor)() { + + err := ts.FillTensor_(value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFix(del bool)(retVal *Tensor) { + + retVal, err := ts.Fix(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFix_()() { + + err := ts.Fix_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFixOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FixOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFlatten(startDim int64, endDim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Flatten(startDim, endDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFlattenDenseTensors(tensors []Tensor)(retVal *Tensor) { + + retVal, err := FlattenDenseTensors(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFlip(dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Flip(dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFliplr(del bool)(retVal *Tensor) { + + retVal, err := ts.Fliplr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFlipud(del bool)(retVal *Tensor) { + + retVal, err := ts.Flipud(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloatPower(exponent *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloatPower(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloatPower_(exponent *Scalar)() { + + err := ts.FloatPower_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func MustFloatPowerScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) { + + retVal, err := FloatPowerScalar(selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) { + + retVal, err := FloatPowerScalarOut(out, selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloatPowerTensor_(exponent *Tensor)() { + + err := ts.FloatPowerTensor_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFloatPowerTensorScalar(exponent *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FloatPowerTensorScalar(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FloatPowerTensorScalarOut(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloatPowerTensorTensorOut(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloor(del bool)(retVal *Tensor) { + + retVal, err := ts.Floor(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloor_()() { + + err := ts.Floor_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFloorDivide(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorDivide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloorDivide_(other *Tensor)() { + + err := ts.FloorDivide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFloorDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorDivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloorDivideScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorDivideScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloorDivideScalar_(other *Scalar)() { + + err := ts.FloorDivideScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFloorOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmax(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Fmax(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FmaxOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmin(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Fmin(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFminOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FminOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmod(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Fmod(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmod_(other *Scalar)() { + + err := ts.Fmod_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFmodScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FmodScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmodTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FmodTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmodTensor_(other *Tensor)() { + + err := ts.FmodTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFmodTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FmodTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrac(del bool)(retVal *Tensor) { + + retVal, err := ts.Frac(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrac_()() { + + err := ts.Frac_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFracOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FracOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.FractionalMaxPool2d(kernelSize, outputSize, randomSamples, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustFractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.FractionalMaxPool2dOutput(output, indices, kernelSize, outputSize, randomSamples, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustFractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.FractionalMaxPool3d(kernelSize, outputSize, randomSamples, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustFractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.FractionalMaxPool3dOutput(output, indices, kernelSize, outputSize, randomSamples, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustFrexp(del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Frexp(del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustFrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.FrexpTensorOut(mantissa, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustFrobeniusNorm(del bool)(retVal *Tensor) { + + retVal, err := ts.FrobeniusNorm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrobeniusNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.FrobeniusNormDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Full(size, fillValue, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFullLike(fillValue *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FullLike(fillValue, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFullOut(out *Tensor, size []int64, fillValue *Scalar)(retVal *Tensor) { + + retVal, err := FullOut(out, size, fillValue) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal *Tensor) { + + retVal, err := ts.FusedMovingAvgObsFakeQuant(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGather(dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Gather(dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.GatherBackward(grad, dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGcd(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Gcd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGcd_(other *Tensor)() { + + err := ts.Gcd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGcdOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GcdOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGe(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Ge(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGe_(other *Scalar)() { + + err := ts.Ge_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GeScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GeTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeTensor_(other *Tensor)() { + + err := ts.GeTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GeTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Gelu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeluBackward(grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GeluBackward(grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeluBackwardGradInput(gradInput *Tensor, grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GeluBackwardGradInput(gradInput, grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeluOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GeluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeometric_(p float64)() { + + err := ts.Geometric_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGeqrf(del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Geqrf(del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustGeqrfA(a *Tensor, tau *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.GeqrfA(a, tau, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustGer(vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Ger(vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGerOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GerOut(out, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGlu(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Glu(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGluBackward(gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.GluBackward(gradOutput, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.GluBackwardGradInput(gradInput, gradOutput, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGluOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.GluOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGrad(del bool)(retVal *Tensor) { + + retVal, err := ts.Grad(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreater(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Greater(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreater_(other *Scalar)() { + + err := ts.Greater_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreaterEqual(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqual(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterEqual_(other *Scalar)() { + + err := ts.GreaterEqual_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreaterEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqualScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterEqualTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqualTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterEqualTensor_(other *Tensor)() { + + err := ts.GreaterEqualTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreaterEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqualTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterTensor_(other *Tensor)() { + + err := ts.GreaterTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreaterTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler3dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := GridSampler3dBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustGroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGru(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := Gru(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) { + + retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := GruData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustGt(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Gt(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGt_(other *Scalar)() { + + err := ts.Gt_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GtScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGtTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GtTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGtTensor_(other *Tensor)() { + + err := ts.GtTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GtTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindowPeriodicAlpha(windowLength, periodic, alpha, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindowPeriodicAlphaBeta(windowLength, periodic, alpha, beta, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HannWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HannWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardshrink(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardshrink(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.HardshrinkBackward(gradOut, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.HardshrinkBackwardGradInput(gradInput, gradOut, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardshrinkOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardshrinkOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardsigmoid(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardsigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardsigmoid_()() { + + err := ts.Hardsigmoid_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHardsigmoidBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardsigmoidBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardsigmoidBackwardGradInput(gradInput, gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardsigmoidOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardsigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardswish(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardswish(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardswish_()() { + + err := ts.Hardswish_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHardswishBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardswishBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardswishOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardswishOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardtanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanh_()() { + + err := ts.Hardtanh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.HardtanhBackwardGradInput(gradInput, gradOutput, minVal, maxVal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardtanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHeaviside(values *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Heaviside(values, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHeaviside_(values *Tensor)() { + + err := ts.Heaviside_(values) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHeavisideOut(out *Tensor, values *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HeavisideOut(out, values, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHistc(bins int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Histc(bins, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor) { + + retVal, err := ts.HistcOut(out, bins, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) { + + retVal, err := Hspmm(mat1, mat2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) { + + retVal, err := HspmmOut(out, mat1, mat2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHstack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := Hstack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := HstackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHuberLoss(target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.HuberLoss(target, reduction, delta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.HuberLossBackward(gradOutput, target, reduction, delta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.HuberLossBackwardOut(gradInput, gradOutput, target, reduction, delta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.HuberLossOut(out, target, reduction, delta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHypot(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Hypot(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHypot_(other *Tensor)() { + + err := ts.Hypot_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHypotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HypotOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustI0(del bool)(retVal *Tensor) { + + retVal, err := ts.I0(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustI0_()() { + + err := ts.I0_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustI0Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.I0Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIgamma(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Igamma(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIgamma_(other *Tensor)() { + + err := ts.Igamma_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIgammaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IgammaOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIgammac(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Igammac(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIgammac_(other *Tensor)() { + + err := ts.Igammac_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIgammacOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IgammacOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIm2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIm2colBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Im2colBackwardGradInput(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIm2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustImag(del bool)(retVal *Tensor) { + + retVal, err := ts.Imag(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexAdd(dim, index, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexAdd_(dim int64, index *Tensor, source *Tensor)() { + + err := ts.IndexAdd_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexAddOut(out *Tensor, dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexAddOut(out, dim, index, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexCopy(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexCopy(dim, index, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexCopy_(dim int64, index *Tensor, source *Tensor)() { + + err := ts.IndexCopy_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexFill(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexFill(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexFill_(dim int64, index *Tensor, value *Scalar)() { + + err := ts.IndexFill_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexFillIntTensor(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexFillIntTensor_(dim int64, index *Tensor, value *Tensor)() { + + err := ts.IndexFillIntTensor_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexSelect(dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor)(retVal *Tensor) { + + retVal, err := IndexSelectBackward(grad, selfSizes, dim, index) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexSelectOut(out, dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndices(del bool)(retVal *Tensor) { + + retVal, err := ts.Indices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInfinitelyDifferentiableGeluBackward(grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.InfinitelyDifferentiableGeluBackward(grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInner(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Inner(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInnerOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.InnerOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustInstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIntRepr(del bool)(retVal *Tensor) { + + retVal, err := ts.IntRepr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInverse(del bool)(retVal *Tensor) { + + retVal, err := ts.Inverse(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInverseOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.InverseOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsCoalesced(del bool)(retVal bool) { + + retVal, err := ts.IsCoalesced(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsComplex(del bool)(retVal bool) { + + retVal, err := ts.IsComplex(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsConj(del bool)(retVal bool) { + + retVal, err := ts.IsConj(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsDistributed(del bool)(retVal bool) { + + retVal, err := ts.IsDistributed(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsFloatingPoint(del bool)(retVal bool) { + + retVal, err := ts.IsFloatingPoint(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsInference(del bool)(retVal bool) { + + retVal, err := ts.IsInference(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsLeaf(del bool)(retVal bool) { + + retVal, err := ts.IsLeaf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsNeg(del bool)(retVal bool) { + + retVal, err := ts.IsNeg(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsNonzero(del bool)(retVal bool) { + + retVal, err := ts.IsNonzero(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsPinned(device gotch.Device, del bool)(retVal bool) { + + retVal, err := ts.IsPinned(device, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsSameSize(other *Tensor, del bool)(retVal bool) { + + retVal, err := ts.IsSameSize(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsSetTo(tensor *Tensor, del bool)(retVal bool) { + + retVal, err := ts.IsSetTo(tensor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsSigned(del bool)(retVal bool) { + + retVal, err := ts.IsSigned(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIsVulkanAvailable()(retVal bool) { + + retVal, err := IsVulkanAvailable() + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Isclose(other, rtol, atol, equalNan, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsfinite(del bool)(retVal *Tensor) { + + retVal, err := ts.Isfinite(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIsin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) { + + retVal, err := Isin(elements, testElements, assumeUnique, invert) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) { + + retVal, err := IsinScalarTensor(element, testElements, assumeUnique, invert) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) { + + retVal, err := IsinScalarTensorOut(out, element, testElements, assumeUnique, invert) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor) { + + retVal, err := IsinTensorScalar(elements, testElement, assumeUnique, invert) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor) { + + retVal, err := IsinTensorScalarOut(out, elements, testElement, assumeUnique, invert) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) { + + retVal, err := IsinTensorTensorOut(out, elements, testElements, assumeUnique, invert) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsinf(del bool)(retVal *Tensor) { + + retVal, err := ts.Isinf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsnan(del bool)(retVal *Tensor) { + + retVal, err := ts.Isnan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsneginf(del bool)(retVal *Tensor) { + + retVal, err := ts.Isneginf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsneginfOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IsneginfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsposinf(del bool)(retVal *Tensor) { + + retVal, err := ts.Isposinf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsposinfOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IsposinfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsreal(del bool)(retVal *Tensor) { + + retVal, err := ts.Isreal(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIstft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Istft(nFft, hopLength, winLength, window, center, normalized, onesided, length, returnComplex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustKaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := KaiserWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustKaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := KaiserWindowBeta(windowLength, periodic, beta, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustKaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := KaiserWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustKlDiv(target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor) { + + retVal, err := ts.KlDiv(target, reduction, logTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustKlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor) { + + retVal, err := ts.KlDivBackward(gradOutput, target, reduction, logTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustKron(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Kron(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustKronOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.KronOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustKthvalue(k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Kthvalue(k, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustKthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.KthvalueValues(values, indices, k, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustL1Loss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1Loss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1LossBackwardGradInput(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1LossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool)(retVal *Tensor) { + + retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLcm(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Lcm(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLcm_(other *Tensor)() { + + err := ts.Lcm_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLcmOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LcmOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLdexp(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Ldexp(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLdexp_(other *Tensor)() { + + err := ts.Ldexp_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLdexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LdexpOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLe(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Le(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLe_(other *Scalar)() { + + err := ts.Le_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LeScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LeTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeTensor_(other *Tensor)() { + + err := ts.LeTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LeTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeakyRelu(del bool)(retVal *Tensor) { + + retVal, err := ts.LeakyRelu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeakyRelu_()() { + + err := ts.LeakyRelu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LeakyReluBackwardGradInput(gradInput, gradOutput, negativeSlope, selfIsResult, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeakyReluOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LeakyReluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerp(end *Tensor, weight *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Lerp(end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerp_(end *Tensor, weight *Scalar)() { + + err := ts.Lerp_(end, weight) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LerpScalarOut(out, end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerpTensor(end *Tensor, weight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LerpTensor(end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerpTensor_(end *Tensor, weight *Tensor)() { + + err := ts.LerpTensor_(end, weight) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LerpTensorOut(out, end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLess(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Less(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLess_(other *Scalar)() { + + err := ts.Less_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLessEqual(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqual(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessEqual_(other *Scalar)() { + + err := ts.LessEqual_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLessEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqualScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessEqualTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqualTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessEqualTensor_(other *Tensor)() { + + err := ts.LessEqualTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLessEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqualTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LessScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LessTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessTensor_(other *Tensor)() { + + err := ts.LessTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLessTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LessTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLgamma(del bool)(retVal *Tensor) { + + retVal, err := ts.Lgamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLgamma_()() { + + err := ts.Lgamma_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLgammaOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LgammaOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCholesky(upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCholesky(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCholeskyEx(upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgCholeskyEx(upper, checkErrors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgCholeskyExL(l, info, upper, checkErrors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCholeskyOut(out, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCond(p *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCond(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCondOut(out *Tensor, p *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCondOut(out, p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCondPStr(p string, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCondPStr(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCondPStrOut(out *Tensor, p string, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCondPStrOut(out, p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCross(other *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCross(other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgCrossOut(out *Tensor, other *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgCrossOut(out, other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgDet(del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgDet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgDetOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgDetOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgDiagonal(a *Tensor, offset int64, dim1 int64, dim2 int64)(retVal *Tensor) { + + retVal, err := LinalgDiagonal(a, offset, dim1, dim2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgEig(del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgEig(del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgEigOut(eigenvalues, eigenvectors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgEigh(uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgEigh(uPLO, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgEighEigvals(eigvals, eigvecs, uPLO, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgEigvals(del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgEigvals(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgEigvalsOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgEigvalsOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgEigvalsh(uPLO string, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgEigvalsh(uPLO, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgEigvalshOut(out *Tensor, uPLO string, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgEigvalshOut(out, uPLO, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgHouseholderProduct(input *Tensor, tau *Tensor)(retVal *Tensor) { + + retVal, err := LinalgHouseholderProduct(input, tau) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor)(retVal *Tensor) { + + retVal, err := LinalgHouseholderProductOut(out, input, tau) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgInv(del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgInv(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgInvEx(checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgInvEx(checkErrors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgInvExInverse(inverse *Tensor, info *Tensor, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgInvExInverse(inverse, info, checkErrors, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgInvOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgInvOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgLstsq(b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsq(b, rcond, driver, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func(ts *Tensor) MustLinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsqOut(solution, residuals, rank, singularValues, b, rcond, driver, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3 +} + +func MustLinalgLuFactor(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := LinalgLuFactor(a, pivot) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustLinalgLuFactorEx(a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := LinalgLuFactorEx(a, pivot, checkErrors) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustLinalgLuFactorExOut(lU *Tensor, pivots *Tensor, info *Tensor, a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := LinalgLuFactorExOut(lU, pivots, info, a, pivot, checkErrors) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustLinalgLuFactorOut(lU *Tensor, pivots *Tensor, a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := LinalgLuFactorOut(lU, pivots, a, pivot) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgMatmul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatmul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatmulOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatrixExp(del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatrixExp(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatrixPower(n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatrixPower(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatrixPowerOut(out, n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatrixRank(tol float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatrixRank(tol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatrixRankAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatrixRankAtolRtolFloat(atol, rtol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatrixRankAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatrixRankAtolRtolFloatOut(out, atol, rtol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgMatrixRankAtolRtolTensor(input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor) { + + retVal, err := LinalgMatrixRankAtolRtolTensor(input, atol, rtol, hermitian) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgMatrixRankAtolRtolTensorOut(out *Tensor, input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor) { + + retVal, err := LinalgMatrixRankAtolRtolTensorOut(out, input, atol, rtol, hermitian) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgMatrixRankOut(out *Tensor, tol float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgMatrixRankOut(out, tol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor) { + + retVal, err := LinalgMatrixRankOutTolTensor(out, input, tol, hermitian) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor) { + + retVal, err := LinalgMatrixRankTolTensor(input, tol, hermitian) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgMultiDot(tensors []Tensor)(retVal *Tensor) { + + retVal, err := LinalgMultiDot(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgMultiDotOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := LinalgMultiDotOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNorm(ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNormOrdStr(ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNormOrdStrOut(out, ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNormOut(out, ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinv(rcond float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinv(rcond, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinvAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinvAtolRtolFloat(atol, rtol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinvAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinvAtolRtolFloatOut(out, atol, rtol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinvAtolRtolTensor(atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinvAtolRtolTensor(atol, rtol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinvAtolRtolTensorOut(out *Tensor, atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinvAtolRtolTensorOut(out, atol, rtol, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinvOut(out, rcond, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinvOutRcondTensor(out, rcond, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgPinvRcondTensor(rcond, hermitian, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgQr(mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgQr(mode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgQrOut(q *Tensor, r *Tensor, mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgQrOut(q, r, mode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgSlogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgSlogdet(del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LinalgSlogdetOut(sign, logabsdet, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustLinalgSolve(input *Tensor, other *Tensor)(retVal *Tensor) { + + retVal, err := LinalgSolve(input, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgSolveOut(out *Tensor, input *Tensor, other *Tensor)(retVal *Tensor) { + + retVal, err := LinalgSolveOut(out, input, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgSolveTriangular(b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgSolveTriangular(b, upper, left, unitriangular, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgSolveTriangularOut(out *Tensor, b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgSolveTriangularOut(out, b, upper, left, unitriangular, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgSvd(a *Tensor, fullMatrices bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := LinalgSvd(a, fullMatrices) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustLinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := LinalgSvdU(u, s, vh, a, fullMatrices) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustLinalgSvdvals(a *Tensor)(retVal *Tensor) { + + retVal, err := LinalgSvdvals(a) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinalgSvdvalsOut(out *Tensor, a *Tensor)(retVal *Tensor) { + + retVal, err := LinalgSvdvalsOut(out, a) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgTensorinv(ind int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgTensorinv(ind, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgTensorinvOut(out *Tensor, ind int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgTensorinvOut(out, ind, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgTensorsolve(other *Tensor, dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgTensorsolve(other, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgTensorsolveOut(out, other, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := Linear(input, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := LinearOut(out, input, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64)(retVal *Tensor) { + + retVal, err := LinspaceOut(out, start, end, steps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog(del bool)(retVal *Tensor) { + + retVal, err := ts.Log(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog10(del bool)(retVal *Tensor) { + + retVal, err := ts.Log10(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog10_()() { + + err := ts.Log10_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLog10Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Log10Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog1p(del bool)(retVal *Tensor) { + + retVal, err := ts.Log1p(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog1p_()() { + + err := ts.Log1p_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLog1pOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Log1pOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog2(del bool)(retVal *Tensor) { + + retVal, err := ts.Log2(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog2_()() { + + err := ts.Log2_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLog2Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Log2Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog_()() { + + err := ts.Log_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogNormal_(mean float64, std float64)() { + + err := ts.LogNormal_(mean, std) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoid(del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoidBackwardGradInput(gradInput, gradOutput, buffer, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoidOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSoftmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexp(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Logaddexp(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexp2(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Logaddexp2(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexp2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Logaddexp2Out(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogaddexpOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogcumsumexp(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Logcumsumexp(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogcumsumexpOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogdet(del bool)(retVal *Tensor) { + + retVal, err := ts.Logdet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalAnd(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalAnd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalAnd_(other *Tensor)() { + + err := ts.LogicalAnd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalAndOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalAndOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalNot(del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalNot(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalNot_()() { + + err := ts.LogicalNot_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalNotOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalNotOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalOr(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalOr(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalOr_(other *Tensor)() { + + err := ts.LogicalOr_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalOrOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalXor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalXor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalXor_(other *Tensor)() { + + err := ts.LogicalXor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalXorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalXorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogit(eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.Logit(eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogit_(eps []float64)() { + + err := ts.Logit_(eps) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogitBackward(gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogitBackward(gradOutput, eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogitBackwardGradInput(gradInput, gradOutput, eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogitOut(out, eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLogspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64)(retVal *Tensor) { + + retVal, err := LogspaceOut(out, start, end, steps, base) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Logsumexp(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LogsumexpOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLstm(input *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := Lstm(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := LstmCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustLstmData(data *Tensor, batchSizes *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := LstmData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustLstsq(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Lstsq(a, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLstsqX(x *Tensor, qr *Tensor, a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.LstsqX(x, qr, a, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustLt(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Lt(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLt_(other *Scalar)() { + + err := ts.Lt_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LtScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLtTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LtTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLtTensor_(other *Tensor)() { + + err := ts.LtTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LtTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLuSolve(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LuSolve(lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := LuUnpack(lUData, lUPivots, unpackData, unpackPivots) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustLuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := LuUnpackOut(p, l, u, lUData, lUPivots, unpackData, unpackPivots) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustMarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) { + + retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedFill(mask *Tensor, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedFill(mask, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedFill_(mask *Tensor, value *Scalar)() { + + err := ts.MaskedFill_(mask, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMaskedFillTensor(mask *Tensor, value *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedFillTensor(mask, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedFillTensor_(mask *Tensor, value *Tensor)() { + + err := ts.MaskedFillTensor_(mask, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMaskedScatter(mask *Tensor, source *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedScatter(mask, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedScatter_(mask *Tensor, source *Tensor)() { + + err := ts.MaskedScatter_(mask, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMaskedSelect(mask *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedSelect(mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor)(retVal *Tensor) { + + retVal, err := MaskedSelectBackward(grad, input, mask) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedSelectOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedSelectOut(out, mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatmul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Matmul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MatmulOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixExp(del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixExp(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixExpBackward(grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixExpBackward(grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixH(del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixH(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixPower(n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixPower(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixPowerOut(out, n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixRank(symmetric bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixRank(symmetric, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixRankTol(tol float64, symmetric bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixRankTol(tol, symmetric, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMax(del bool)(retVal *Tensor) { + + retVal, err := ts.Max(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MaxDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MaxDimMax(max, maxValues, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMaxOther(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxOther(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MaxPool1dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MaxPool2dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MaxPool2dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MaxPool3dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MaxPool3dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMaxUnpool2d(indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2d(indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dBackwardGradInput(gradInput, gradOutput, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dBackwardGradInput(gradInput, gradOutput, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaximum(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Maximum(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaximumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaximumOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMean(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Mean(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.MeanDim(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMedian(del bool)(retVal *Tensor) { + + retVal, err := ts.Median(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MedianDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MedianDimValues(values, indices, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMh(del bool)(retVal *Tensor) { + + retVal, err := ts.Mh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMin(del bool)(retVal *Tensor) { + + retVal, err := ts.Min(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMinDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MinDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.MinDimMin(min, minIndices, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMinOther(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MinOther(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMinOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MinOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMinimum(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Minimum(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMinimumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MinimumOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := MiopenBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustMiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := MiopenBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustMiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenRnn(input *Tensor, weight []Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) { + + retVal0, retVal1, retVal2, retVal3, retVal4, err := MiopenRnn(input, weight, weightStride0, hx, cx, mode, hiddenSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2, retVal3, retVal4 +} + +func(ts *Tensor) MustMish(del bool)(retVal *Tensor) { + + retVal, err := ts.Mish(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMish_()() { + + err := ts.Mish_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMishBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MishBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMishOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MishOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnAdaptiveAvgPool2dBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnLinear(weight *Tensor, bias *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnLinear(weight, bias, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor)(retVal *Tensor) { + + retVal, err := MkldnnLinearBackwardInput(inputSize, gradOutput, weight) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := MkldnnLinearBackwardWeights(gradOutput, input, weight, biasDefined) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) { + + retVal, err := MkldnnMaxPool2dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnMaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) { + + retVal, err := MkldnnMaxPool3dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnReorderConv3dWeight(padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMm(mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Mm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MmOut(out, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMode(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Mode(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.ModeValues(values, indices, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustMoveaxis(source []int64, destination []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Moveaxis(source, destination, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMoveaxisInt(source int64, destination int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MoveaxisInt(source, destination, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMovedim(source []int64, destination []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Movedim(source, destination, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMovedimInt(source int64, destination int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MovedimInt(source, destination, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLossBackwardGradInput(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMsort(del bool)(retVal *Tensor) { + + retVal, err := ts.Msort(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMsortOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MsortOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMt(del bool)(retVal *Tensor) { + + retVal, err := ts.Mt(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Mul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMul_(other *Tensor)() { + + err := ts.Mul_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MulOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMulScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.MulScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMulScalar_(other *Scalar)() { + + err := ts.MulScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultiMarginLossBackwardGradInput(gradInput, gradOutput, target, p, margin, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, isTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultinomial(numSamples int64, replacement bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Multinomial(numSamples, replacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MultinomialOut(out, numSamples, replacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiply(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Multiply(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiply_(other *Tensor)() { + + err := ts.Multiply_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMultiplyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MultiplyOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiplyScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.MultiplyScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiplyScalar_(other *Scalar)() { + + err := ts.MultiplyScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMv(vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Mv(vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMvOut(out *Tensor, vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MvOut(out, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMvlgamma(p int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Mvlgamma(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMvlgamma_(p int64)() { + + err := ts.Mvlgamma_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMvlgammaOut(out *Tensor, p int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MvlgammaOut(out, p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanToNum(nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.NanToNum(nan, posinf, neginf, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanToNum_(nan []float64, posinf []float64, neginf []float64)() { + + err := ts.NanToNum_(nan, posinf, neginf) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.NanToNumOut(out, nan, posinf, neginf, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Nanmean(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NanmeanOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanmedian(del bool)(retVal *Tensor) { + + retVal, err := ts.Nanmedian(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanmedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.NanmedianDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustNanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.NanmedianDimValues(values, indices, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustNanquantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.Nanquantile(q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.NanquantileOut(out, q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanquantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.NanquantileScalar(q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.NanquantileScalarOut(out, q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNansum(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Nansum(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNansumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NansumDimIntlist(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNansumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NansumIntlistOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNarrow(dim int64, start int64, length int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Narrow(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NarrowCopy(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NarrowCopyOut(out, dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNarrowTensor(dim int64, start *Tensor, length int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NarrowTensor(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := NativeBatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustNativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := NativeBatchNormOut(out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, training, momentum, eps) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustNativeChannelShuffle(groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NativeChannelShuffle(groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNativeDropout(input *Tensor, p float64, train bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := NativeDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustNativeDropoutBackward(gradOutput *Tensor, mask *Tensor, scale float64)(retVal *Tensor) { + + retVal, err := NativeDropoutBackward(gradOutput, mask, scale) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := NativeGroupNorm(input, weight, bias, n, c, hxW, group, eps) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func MustNativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := NativeLayerNorm(input, normalizedShape, weight, bias, eps) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustNativeNorm(del bool)(retVal *Tensor) { + + retVal, err := ts.NativeNorm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NativeNormScalaroptDimDtype(p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNe(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Ne(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNe_(other *Scalar)() { + + err := ts.Ne_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.NeScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNeTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NeTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNeTensor_(other *Tensor)() { + + err := ts.NeTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NeTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNeg(del bool)(retVal *Tensor) { + + retVal, err := ts.Neg(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNeg_()() { + + err := ts.Neg_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNegOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NegOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNegative(del bool)(retVal *Tensor) { + + retVal, err := ts.Negative(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNegative_()() { + + err := ts.Negative_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNegativeOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NegativeOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewEmptyStrided(size, stride, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewOnes(size, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNextafter(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Nextafter(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNextafter_(other *Tensor)() { + + err := ts.Nextafter_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNextafterOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NextafterOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2dBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLossBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLossNd(target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNonzero(del bool)(retVal *Tensor) { + + retVal, err := ts.Nonzero(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNonzeroOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NonzeroOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNorm(del bool)(retVal *Tensor) { + + retVal, err := ts.Norm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NormDtypeOut(out, p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormExceptDim(v *Tensor, pow int64, dim int64)(retVal *Tensor) { + + retVal, err := NormExceptDim(v, pow, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NormOut(out, p, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NormScalaroptDim(p, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NormScalaroptDimDtype(p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NormScalaroptDtype(p, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormal(out *Tensor, mean *Tensor, std float64)(retVal *Tensor) { + + retVal, err := Normal(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormal_(mean float64, std float64)() { + + err := ts.Normal_(mean, std) + if err != nil { log.Fatal(err) } + + return +} + +func MustNormalFloatFloatOut(out *Tensor, mean float64, std float64, size []int64)(retVal *Tensor) { + + retVal, err := NormalFloatFloatOut(out, mean, std, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalFloatTensorOut(out *Tensor, mean float64, std *Tensor)(retVal *Tensor) { + + retVal, err := NormalFloatTensorOut(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalTensorTensorOut(out *Tensor, mean *Tensor, std *Tensor)(retVal *Tensor) { + + retVal, err := NormalTensorTensorOut(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqual(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqual(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqual_(other *Scalar)() { + + err := ts.NotEqual_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNotEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqualScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqualTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqualTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqualTensor_(other *Tensor)() { + + err := ts.NotEqualTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNotEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqualTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNorm(keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNorm(keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNormDim(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNormDimOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNormOut(out *Tensor, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNormOut(out, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNumpyT(del bool)(retVal *Tensor) { + + retVal, err := ts.NumpyT(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOneHot(numClasses int64, del bool)(retVal *Tensor) { + + retVal, err := ts.OneHot(numClasses, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Ones(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOnesLike(del bool)(retVal *Tensor) { + + retVal, err := ts.OnesLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustOnesOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := OnesOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrgqr(input2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Orgqr(input2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrgqrOut(out *Tensor, input2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.OrgqrOut(out, input2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrmqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Ormqr(input2, input3, left, transpose, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) { + + retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOuter(vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Outer(vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.OuterOut(out, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOutputNr(del bool)(retVal int64) { + + retVal, err := ts.OutputNr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPadSequence(sequences []Tensor, batchFirst bool, paddingValue float64)(retVal *Tensor) { + + retVal, err := PadSequence(sequences, batchFirst, paddingValue) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool)(retVal *Tensor) { + + retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPdist(p float64, del bool)(retVal *Tensor) { + + retVal, err := ts.Pdist(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPermute(dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Permute(dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPinMemory(device gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.PinMemory(device, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPinverse(rcond float64, del bool)(retVal *Tensor) { + + retVal, err := ts.Pinverse(rcond, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPixelShuffle(upscaleFactor int64, del bool)(retVal *Tensor) { + + retVal, err := ts.PixelShuffle(upscaleFactor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPixelUnshuffle(downscaleFactor int64, del bool)(retVal *Tensor) { + + retVal, err := ts.PixelUnshuffle(downscaleFactor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPoisson(del bool)(retVal *Tensor) { + + retVal, err := ts.Poisson(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal *Tensor) { + + retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPolar(abs *Tensor, angle *Tensor)(retVal *Tensor) { + + retVal, err := Polar(abs, angle) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPolarOut(out *Tensor, abs *Tensor, angle *Tensor)(retVal *Tensor) { + + retVal, err := PolarOut(out, abs, angle) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPolygamma(n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Polygamma(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPolygamma_(n int64)() { + + err := ts.Polygamma_(n) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.PolygammaOut(out, n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPositive(del bool)(retVal *Tensor) { + + retVal, err := ts.Positive(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPow(exponent *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Pow(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPow_(exponent *Scalar)() { + + err := ts.Pow_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func MustPowScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) { + + retVal, err := PowScalar(selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) { + + retVal, err := PowScalarOut(out, selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPowTensor_(exponent *Tensor)() { + + err := ts.PowTensor_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustPowTensorScalar(exponent *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.PowTensorScalar(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPowTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.PowTensorScalarOut(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPowTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.PowTensorTensorOut(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPrelu(weight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Prelu(weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPreluBackward(gradOutput *Tensor, weight *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.PreluBackward(gradOutput, weight, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustProd(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Prod(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.ProdDimInt(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.ProdIntOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPut(index *Tensor, source *Tensor, accumulate bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Put(index, source, accumulate, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool)() { + + err := ts.Put_(index, source, accumulate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustQPerChannelAxis(del bool)(retVal int64) { + + retVal, err := ts.QPerChannelAxis(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQPerChannelScales(del bool)(retVal *Tensor) { + + retVal, err := ts.QPerChannelScales(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQPerChannelZeroPoints(del bool)(retVal *Tensor) { + + retVal, err := ts.QPerChannelZeroPoints(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQScale(del bool)(retVal float64) { + + retVal, err := ts.QScale(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQZeroPoint(del bool)(retVal int64) { + + retVal, err := ts.QZeroPoint(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQr(some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Qr(some, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustQrQ(q *Tensor, r *Tensor, some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.QrQ(q, r, some, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustQuantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.Quantile(q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantileOut(out, q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantileScalar(q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantileScalarOut(out, q, dim, keepdim, interpolation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizePerTensorDynamic(dtype gotch.DType, reduceRange bool, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizePerTensorDynamic(dtype, reduceRange, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizePerTensorTensorQparams(scale, zeroPoint, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor) { + + retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) { + + retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := QuantizedLstmCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustQuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizedMaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) { + + retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) { + + retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRad2deg(del bool)(retVal *Tensor) { + + retVal, err := ts.Rad2deg(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRad2deg_()() { + + err := ts.Rad2deg_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRad2degOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Rad2degOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Rand(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandLike(del bool)(retVal *Tensor) { + + retVal, err := ts.RandLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := RandOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Randint(high, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandintLike(high int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RandintLike(high, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandintLikeLowDtype(low int64, high int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RandintLikeLowDtype(low, high, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := RandintLow(low, high, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandintLowOut(out *Tensor, low int64, high int64, size []int64)(retVal *Tensor) { + + retVal, err := RandintLowOut(out, low, high, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandintOut(out *Tensor, high int64, size []int64)(retVal *Tensor) { + + retVal, err := RandintOut(out, high, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Randn(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandnLike(del bool)(retVal *Tensor) { + + retVal, err := ts.RandnLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandnOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := RandnOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandom_()() { + + err := ts.Random_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRandomFrom_(from int64, to []int64)() { + + err := ts.RandomFrom_(from, to) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRandomTo_(to int64)() { + + err := ts.RandomTo_(to) + if err != nil { log.Fatal(err) } + + return +} + +func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Randperm(n, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandpermOut(out *Tensor, n int64)(retVal *Tensor) { + + retVal, err := RandpermOut(out, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRange(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Range(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRangeOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) { + + retVal, err := RangeOut(out, start, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := RangeStep(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRavel(del bool)(retVal *Tensor) { + + retVal, err := ts.Ravel(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReal(del bool)(retVal *Tensor) { + + retVal, err := ts.Real(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReciprocal(del bool)(retVal *Tensor) { + + retVal, err := ts.Reciprocal(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReciprocal_()() { + + err := ts.Reciprocal_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustReciprocalOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ReciprocalOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dBackwardGradInput(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dBackwardGradInput(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad3d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad3d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad3dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad3dBackwardGradInput(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad3dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Relu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRelu6(del bool)(retVal *Tensor) { + + retVal, err := ts.Relu6(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRelu6_()() { + + err := ts.Relu6_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRelu_()() { + + err := ts.Relu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRemainder(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Remainder(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRemainder_(other *Scalar)() { + + err := ts.Remainder_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRemainderScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.RemainderScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRemainderScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := RemainderScalarTensor(selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRemainderTensor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.RemainderTensor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRemainderTensor_(other *Tensor)() { + + err := ts.RemainderTensor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRemainderTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.RemainderTensorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRenorm(p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Renorm(p, dim, maxnorm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRenorm_(p *Scalar, dim int64, maxnorm *Scalar)() { + + err := ts.Renorm_(p, dim, maxnorm) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.RenormOut(out, p, dim, maxnorm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRepeat(repeats []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Repeat(repeats, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRepeatInterleave(repeats *Tensor, outputSize []int64)(retVal *Tensor) { + + retVal, err := RepeatInterleave(repeats, outputSize) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RepeatInterleaveSelfInt(repeats, dim, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RepeatInterleaveSelfTensor(repeats, dim, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dBackwardGradInput(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dBackwardGradInput(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dBackwardGradInput(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRequiresGrad_(requiresGrad bool)() { + + err := ts.RequiresGrad_(requiresGrad) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustReshape(shape []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Reshape(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReshapeAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ReshapeAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustResize_(size []int64)() { + + err := ts.Resize_(size) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustResizeAs_(theTemplate *Tensor)() { + + err := ts.ResizeAs_(theTemplate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustResizeAsSparse_(theTemplate *Tensor)() { + + err := ts.ResizeAsSparse_(theTemplate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustResolveConj(del bool)(retVal *Tensor) { + + retVal, err := ts.ResolveConj(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustResolveNeg(del bool)(retVal *Tensor) { + + retVal, err := ts.ResolveNeg(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRetainsGrad(del bool)(retVal bool) { + + retVal, err := ts.RetainsGrad(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRnnRelu(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := RnnRelu(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) { + + retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := RnnReluData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustRnnTanh(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := RnnTanh(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) { + + retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := RnnTanhData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustRoll(shifts []int64, dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Roll(shifts, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRot90(k int64, dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Rot90(k, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRound(del bool)(retVal *Tensor) { + + retVal, err := ts.Round(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRound_()() { + + err := ts.Round_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRoundDecimals(decimals int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RoundDecimals(decimals, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRoundDecimals_(decimals int64)() { + + err := ts.RoundDecimals_(decimals) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRoundDecimalsOut(out *Tensor, decimals int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RoundDecimalsOut(out, decimals, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRoundOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.RoundOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRowStack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := RowStack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRowStackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := RowStackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRrelu(training bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Rrelu(training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRrelu_(training bool)() { + + err := ts.Rrelu_(training) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRreluWithNoise(noise *Tensor, training bool, del bool)(retVal *Tensor) { + + retVal, err := ts.RreluWithNoise(noise, training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRreluWithNoise_(noise *Tensor, training bool)() { + + err := ts.RreluWithNoise_(noise, training) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor) { + + retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool)(retVal *Tensor) { + + retVal, err := ts.RreluWithNoiseOut(out, noise, training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsqrt(del bool)(retVal *Tensor) { + + retVal, err := ts.Rsqrt(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsqrt_()() { + + err := ts.Rsqrt_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRsqrtOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.RsqrtOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsub(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Rsub(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsubScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.RsubScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := ScalarTensor(s, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatter(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Scatter(dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatter_(dim int64, index *Tensor, src *Tensor)() { + + err := ts.Scatter_(dim, index, src) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatterAdd(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterAdd(dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterAdd_(dim int64, index *Tensor, src *Tensor)() { + + err := ts.ScatterAdd_(dim, index, src) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterAddOut(out, dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterReduce(dim, index, src, reduce, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string)() { + + err := ts.ScatterReduce_(dim, index, src, reduce) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterReduceOut(out, dim, index, src, reduce, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterSrcOut(out, dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterValue(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterValue(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterValue_(dim int64, index *Tensor, value *Scalar)() { + + err := ts.ScatterValue_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterValueOut(out, dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterValueReduce(dim, index, value, reduce, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string)() { + + err := ts.ScatterValueReduce_(dim, index, value, reduce) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterValueReduceOut(out, dim, index, value, reduce, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSearchsorted(sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Searchsorted(sortedSequence, outInt32, right, side, sorter, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool, side string, sorter *Tensor)(retVal *Tensor) { + + retVal, err := SearchsortedScalar(sortedSequence, selfScalar, outInt32, right, side, sorter) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SearchsortedTensorOut(out, sortedSequence, outInt32, right, side, sorter, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, axis int64, unsafety bool, initial *Scalar)(retVal *Tensor) { + + retVal, err := SegmentReduce(data, reduce, lengths, indices, axis, unsafety, initial) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSelect(dim int64, index int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Select(dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor) { + + retVal, err := SelectBackward(gradOutput, inputSizes, dim, index) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSelectScatter(src *Tensor, dim int64, index int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SelectScatter(src, dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Selu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSelu_()() { + + err := ts.Selu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSet_()() { + + err := ts.Set_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSetRequiresGrad(r bool, del bool)(retVal *Tensor) { + + retVal, err := ts.SetRequiresGrad(r, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSetSourceTensor_(source *Tensor)() { + + err := ts.SetSourceTensor_(source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSgn(del bool)(retVal *Tensor) { + + retVal, err := ts.Sgn(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSgn_()() { + + err := ts.Sgn_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSgnOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SgnOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSigmoid(del bool)(retVal *Tensor) { + + retVal, err := ts.Sigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSigmoid_()() { + + err := ts.Sigmoid_() + if err != nil { log.Fatal(err) } + + return +} + +func MustSigmoidBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := SigmoidBackward(gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := SigmoidBackwardGradInput(gradInput, gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSigmoidOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSign(del bool)(retVal *Tensor) { + + retVal, err := ts.Sign(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSign_()() { + + err := ts.Sign_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSignOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SignOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSignbit(del bool)(retVal *Tensor) { + + retVal, err := ts.Signbit(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSignbitOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SignbitOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSilu(del bool)(retVal *Tensor) { + + retVal, err := ts.Silu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSilu_()() { + + err := ts.Silu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSiluBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SiluBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SiluBackwardGradInput(gradInput, gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSiluOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SiluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSin(del bool)(retVal *Tensor) { + + retVal, err := ts.Sin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSin_()() { + + err := ts.Sin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSinOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSinc(del bool)(retVal *Tensor) { + + retVal, err := ts.Sinc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSinc_()() { + + err := ts.Sinc_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSincOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SincOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSinh(del bool)(retVal *Tensor) { + + retVal, err := ts.Sinh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSinh_()() { + + err := ts.Sinh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSinhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SinhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlice(dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Slice(dim, start, end, step, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor) { + + retVal, err := SliceBackward(gradOutput, inputSizes, dim, start, end, step) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSliceScatter(src *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SliceScatter(src, dim, start, end, step, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Slogdet(del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustSlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmm(mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Smm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1Loss(target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1LossBackwardGradInput(gradInput, gradOutput, target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1LossOut(out, target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Softmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplus(del bool)(retVal *Tensor) { + + retVal, err := ts.Softplus(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftplusBackwardGradInput(gradInput, gradOutput, beta, threshold, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplusOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftplusOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrink(del bool)(retVal *Tensor) { + + retVal, err := ts.Softshrink(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftshrinkBackwardGradInput(gradInput, gradOutput, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrinkOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftshrinkOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSolve(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Solve(a, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustSolveSolution(solution *Tensor, lu *Tensor, a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.SolveSolution(solution, lu, a, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustSort(dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Sort(dim, descending, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustSortStable(stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.SortStable(stable, dim, descending, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustSortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.SortValues(values, indices, dim, descending, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustSortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.SortValuesStable(values, indices, stable, dim, descending, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCooTensor(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCooTensorIndices(indices, values, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCooTensorIndicesSize(indices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCsrTensor(crowIndices, colIndices, values, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCsrTensorCrowColValueSize(crowIndices, colIndices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSparseDim(del bool)(retVal int64) { + + retVal, err := ts.SparseDim(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSparseMask(mask *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SparseMask(mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64)() { + + err := ts.SparseResize_(size, sparseDim, denseDim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)() { + + err := ts.SparseResizeAndClear_(size, sparseDim, denseDim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSparseSampledAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SparseSampledAddmm(mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSparseSampledAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SparseSampledAddmmOut(out, mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialDigamma(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialDigamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialDigammaOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialDigammaOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialEntr(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialEntr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialEntrOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialEntrOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErf(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErfOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErfc(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErfc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErfcOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErfcOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErfcx(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErfcx(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErfcxOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErfcxOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErfinv(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErfinv(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialErfinvOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialErfinvOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialExp2(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialExp2(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialExp2Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialExp2Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialExpit(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialExpit(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialExpitOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialExpitOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialExpm1(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialExpm1(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialExpm1Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialExpm1Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialGammainc(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialGammainc(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialGammaincOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialGammaincOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialGammaincc(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialGammaincc(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialGammainccOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialGammainccOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialGammaln(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialGammaln(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialGammalnOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialGammalnOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI0(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI0(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI0Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI0Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI0e(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI0e(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI0eOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI0eOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI1(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI1(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI1Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI1Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI1e(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI1e(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialI1eOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialI1eOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialLog1p(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialLog1p(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialLog1pOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialLog1pOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialLogSoftmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialLogit(eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialLogit(eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialLogitOut(out, eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialLogsumexp(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialLogsumexpOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialMultigammaln(p int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialMultigammaln(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialMultigammalnOut(out *Tensor, p int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialMultigammalnOut(out, p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialNdtr(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialNdtr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialNdtrOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialNdtrOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialNdtri(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialNdtri(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialNdtriOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialNdtriOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialPolygamma(n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialPolygamma(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialPolygammaOut(out, n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialPsi(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialPsi(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialPsiOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialPsiOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialRound(decimals int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialRound(decimals, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialRoundOut(out *Tensor, decimals int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialRoundOut(out, decimals, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialSinc(del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialSinc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialSincOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialSincOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialSoftmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlog1py(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlog1py(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlog1pyOtherScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlog1pyOtherScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlog1pyOtherScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlog1pyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlog1pyOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := SpecialXlog1pySelfScalar(selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := SpecialXlog1pySelfScalarOut(out, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlogy(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlogy(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlogyOtherScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlogyOtherScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlogyOtherScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialXlogyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialXlogyOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := SpecialXlogySelfScalar(selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := SpecialXlogySelfScalarOut(out, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialZeta(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialZeta(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialZetaOtherScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialZetaOtherScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialZetaOtherScalarOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSpecialZetaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SpecialZetaOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := SpecialZetaSelfScalar(selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := SpecialZetaSelfScalarOut(out, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqrt(del bool)(retVal *Tensor) { + + retVal, err := ts.Sqrt(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqrt_()() { + + err := ts.Sqrt_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSqrtOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SqrtOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSquare(del bool)(retVal *Tensor) { + + retVal, err := ts.Square(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSquare_()() { + + err := ts.Square_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSquareOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SquareOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqueeze(del bool)(retVal *Tensor) { + + retVal, err := ts.Squeeze(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqueeze_()() { + + err := ts.Squeeze_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSqueezeDim(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SqueezeDim(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqueezeDim_(dim int64)() { + + err := ts.SqueezeDim_(dim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSspaddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Sspaddmm(mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SspaddmmOut(out, mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustStack(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Stack(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustStackOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := StackOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStd(unbiased bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Std(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStdCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.StdCorrection(dim, correction, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStdCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.StdCorrectionOut(out, dim, correction, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStdDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.StdDim(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStdMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.StdMean(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustStdMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.StdMeanCorrection(dim, correction, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustStdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.StdMeanDim(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustStdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, returnComplex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSub(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Sub(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSub_(other *Tensor)() { + + err := ts.Sub_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSubOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SubOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SubScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubScalar_(other *Scalar)() { + + err := ts.SubScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSubtract(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Subtract(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubtract_(other *Tensor)() { + + err := ts.Subtract_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSubtractOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SubtractOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubtractScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SubtractScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubtractScalar_(other *Scalar)() { + + err := ts.SubtractScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSum(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Sum(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.SumDimIntlist(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.SumIntlistOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSumToSize(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SumToSize(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSvd(some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts.Svd(some, computeUv, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustSvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts.SvdU(u, s, v, some, computeUv, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustSwapaxes(axis0 int64, axis1 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Swapaxes(axis0, axis1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSwapaxes_(axis0 int64, axis1 int64)() { + + err := ts.Swapaxes_(axis0, axis1) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSwapdims(dim0 int64, dim1 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Swapdims(dim0, dim1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSwapdims_(dim0 int64, dim1 int64)() { + + err := ts.Swapdims_(dim0, dim1) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSymeig(eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Symeig(eigenvectors, upper, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustSymeigE(e *Tensor, v *Tensor, eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.SymeigE(e, v, eigenvectors, upper, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustT(del bool)(retVal *Tensor) { + + retVal, err := ts.T(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustT_()() { + + err := ts.T_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTake(index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Take(index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTakeAlongDim(indices *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.TakeAlongDim(indices, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.TakeAlongDimOut(out, indices, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTakeOut(out *Tensor, index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TakeOut(out, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTan(del bool)(retVal *Tensor) { + + retVal, err := ts.Tan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTan_()() { + + err := ts.Tan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTanOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Tanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTanh_()() { + + err := ts.Tanh_() + if err != nil { log.Fatal(err) } + + return +} + +func MustTanhBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := TanhBackward(gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := TanhBackwardGradInput(gradInput, gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.TensordotOut(out, other, dimsSelf, dimsOther, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustThreshold(threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Threshold(threshold, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustThreshold_(threshold *Scalar, value *Scalar)() { + + err := ts.Threshold_(threshold, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ThresholdBackward(gradOutput, threshold, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ThresholdBackwardGradInput(gradInput, gradOutput, threshold, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ThresholdOut(out, threshold, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTile(dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Tile(dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTo(device gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.To(device, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToDense(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.ToDense(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustToDenseBackward(grad *Tensor, input *Tensor)(retVal *Tensor) { + + retVal, err := ToDenseBackward(grad, input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.ToDevice(device, dtype, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.ToDtype(dtype, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.ToDtypeLayout(optionsKind, optionsDevice, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToMkldnn(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.ToMkldnn(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustToMkldnnBackward(grad *Tensor, input *Tensor)(retVal *Tensor) { + + retVal, err := ToMkldnnBackward(grad, input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToOther(other *Tensor, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.ToOther(other, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToSparse(del bool)(retVal *Tensor) { + + retVal, err := ts.ToSparse(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToSparseSparseDim(sparseDim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ToSparseSparseDim(sparseDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTopk(k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.Topk(k, dim, largest, sorted, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustTopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.TopkValues(values, indices, k, dim, largest, sorted, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustTotype(scalarType gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Totype(scalarType, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrace(del bool)(retVal *Tensor) { + + retVal, err := ts.Trace(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTraceBackward(grad *Tensor, sizes []int64)(retVal *Tensor) { + + retVal, err := TraceBackward(grad, sizes) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Transpose(dim0, dim1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTranspose_(dim0 int64, dim1 int64)() { + + err := ts.Transpose_(dim0, dim1) + if err != nil { log.Fatal(err) } + + return +} + +func MustTrapezoid(y *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Trapezoid(y, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := TrapezoidX(y, x, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTrapz(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Trapz(y, x, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTrapzDx(y *Tensor, dx float64, dim int64)(retVal *Tensor) { + + retVal, err := TrapzDx(y, dx, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.TriangularSolve(a, upper, transpose, unitriangular, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustTriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.TriangularSolveX(x, m, a, upper, transpose, unitriangular, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustTril(diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Tril(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTril_(diagonal int64)() { + + err := ts.Tril_(diagonal) + if err != nil { log.Fatal(err) } + + return +} + +func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrilOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.TrilOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal *Tensor) { + + retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTriu(diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Triu(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTriu_(diagonal int64)() { + + err := ts.Triu_(diagonal) + if err != nil { log.Fatal(err) } + + return +} + +func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTriuOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.TriuOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrueDivide(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TrueDivide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrueDivide_(other *Tensor)() { + + err := ts.TrueDivide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTrueDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TrueDivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrueDivideScalar(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.TrueDivideScalar(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrueDivideScalar_(other *Scalar)() { + + err := ts.TrueDivideScalar_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTrunc(del bool)(retVal *Tensor) { + + retVal, err := ts.Trunc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrunc_()() { + + err := ts.Trunc_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTruncOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TruncOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTypeAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TypeAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUnflatten(dim int64, sizes []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Unflatten(dim, sizes, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Unfold(dimension, size, step, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor) { + + retVal, err := UnfoldBackward(gradIn, inputSizes, dim, size, step) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUniform_(from float64, to float64)() { + + err := ts.Uniform_(from, to) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustUniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts.UniqueConsecutive(returnInverse, returnCounts, dim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustUniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts.UniqueDim(dim, sorted, returnInverse, returnCounts, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustUniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) { + + retVal0, retVal1, retVal2, err := ts.UniqueDimConsecutive(dim, returnInverse, returnCounts, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1, retVal2 +} + +func(ts *Tensor) MustUnsqueeze(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Unsqueeze(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUnsqueeze_(dim int64)() { + + err := ts.Unsqueeze_(dim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBicubic2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBilinear2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleLinear1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest1d(outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleTrilinear3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool)(retVal *Tensor) { + + retVal, err := ValueSelectingReductionBackward(grad, dim, indices, sizes, keepdim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustValues(del bool)(retVal *Tensor) { + + retVal, err := ts.Values(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustVander(x *Tensor, n []int64, increasing bool)(retVal *Tensor) { + + retVal, err := Vander(x, n, increasing) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVar(unbiased bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Var(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVarCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.VarCorrection(dim, correction, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVarCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.VarCorrectionOut(out, dim, correction, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVarDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.VarDim(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVarMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.VarMean(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustVarMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.VarMeanCorrection(dim, correction, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustVarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) { + + retVal0, retVal1, err := ts.VarMeanDim(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal0, retVal1 +} + +func(ts *Tensor) MustVarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVdot(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Vdot(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVdotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.VdotOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustView(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.View(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustViewAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ViewAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustViewAsComplex(del bool)(retVal *Tensor) { + + retVal, err := ts.ViewAsComplex(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustViewAsReal(del bool)(retVal *Tensor) { + + retVal, err := ts.ViewAsReal(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustViewDtype(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.ViewDtype(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustVstack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := Vstack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustVstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := VstackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustWhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar)(retVal *Tensor) { + + retVal, err := WhereScalar(condition, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustWhereScalarother(condition *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.WhereScalarother(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustWhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := WhereScalarself(condition, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustWhereSelf(condition *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.WhereSelf(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustXlogy(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Xlogy(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustXlogy_(other *Tensor)() { + + err := ts.Xlogy_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustXlogyOutscalarOther(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.XlogyOutscalarOther(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustXlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := XlogyOutscalarSelf(out, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustXlogyOuttensor(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.XlogyOuttensor(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustXlogyScalarOther(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.XlogyScalarOther(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustXlogyScalarOther_(other *Scalar)() { + + err := ts.XlogyScalarOther_(other) + if err != nil { log.Fatal(err) } + + return +} + +func MustXlogyScalarSelf(selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := XlogyScalarSelf(selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustZero_()() { + + err := ts.Zero_() + if err != nil { log.Fatal(err) } + + return +} + +func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Zeros(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustZerosLike(del bool)(retVal *Tensor) { + + retVal, err := ts.ZerosLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustZerosOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := ZerosOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} +// End of implementing Tensor ================================= diff --git a/ts/tensor-generated.go b/ts/tensor-generated.go index d3cad73..52c4b1c 100644 --- a/ts/tensor-generated.go +++ b/ts/tensor-generated.go @@ -5,28741 +5,26076 @@ package ts // #include "stdlib.h" import "C" -import ( - "unsafe" +import( + "unsafe" - "github.com/sugarme/gotch" - lib "github.com/sugarme/gotch/libtch" + "github.com/sugarme/gotch" + lib "github.com/sugarme/gotch/libtch" ) -func (ts *Tensor) __And_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.Atg__And_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __AndTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__AndTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Iand_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __IandTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__IandTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Ilshift_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __IlshiftTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__IlshiftTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Ior_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __IorTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__IorTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Irshift_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __IrshiftTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__IrshiftTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Ixor_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __IxorTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__IxorTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Lshift_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __LshiftTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__LshiftTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Or_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Or_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __OrTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__OrTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Rshift_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __RshiftTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__RshiftTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __Xor_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) __XorTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__XorTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AddBatchDim(batchDim int64, level int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AddBatchDim(ptr, ts.ctensor, batchDim, level) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AddRelu(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AddRelu(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AddRelu_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AddRelu_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _AddReluOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AddReluOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AddReluScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AddReluScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _AddReluScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AddReluScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _Aminmax(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_Aminmax(ctensorPtr0, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _AminmaxDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.Atg_AminmaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AmpUpdateScale_(ptr, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_BaddbmmMkl_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _CastByte(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CastChar(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CastDouble(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CastFloat(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CastHalf(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CastInt(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CastLong(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CastShort(nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _Cat(tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.Atg_Cat(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.Atg_CatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CholeskySolveHelper(a *Tensor, upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Coalesce(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Coalesce(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Coalesced_(coalesced bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ccoalesced := int32(0) - if coalesced { - ccoalesced = int32(1) - } - lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func _ComputeLinearCombination(input *Tensor, coefficients *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ComputeLinearCombination(ptr, input.ctensor, coefficients.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ComputeLinearCombinationOut(ptr, out.ctensor, input.ctensor, coefficients.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Conj(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Conj(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ConjPhysical(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ConjPhysical(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ConvDepthwise2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ConvDepthwise2dBackward(gradInput *Tensor, gradWeight *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_ConvDepthwise2dBackward(ctensorPtr0, gradInput.ctensor, gradWeight.ctensor, gradOutput.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ConvDepthwise2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - lib.Atg_ConvertIndicesFromCooToCsr(ptr, ts.ctensor, size, coutInt32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - lib.Atg_ConvertIndicesFromCooToCsrOut(ptr, out.ctensor, ts.ctensor, size, coutInt32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { - ctransposed = int32(1) - } - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - ccudnnEnabled := int32(0) - if cudnnEnabled { - ccudnnEnabled = int32(1) - } - callowTf32 := int32(0) - if allowTf32 { - callowTf32 = int32(1) - } - lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { - ctransposed = int32(1) - } - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - ccudnnEnabled := int32(0) - if cudnnEnabled { - ccudnnEnabled = int32(1) - } - lib.Atg_ConvolutionDeprecated(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ConvolutionMode(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { - ctransposed = int32(1) - } - lib.Atg_ConvolutionNogroup(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CopyFrom(dst *Tensor, nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _CopyFromAndResize(dst *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_CopyFromAndResize(ptr, ts.ctensor, dst.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - czeroInfinity := int32(0) - if zeroInfinity { - czeroInfinity = int32(1) - } - lib.Atg_CtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func _CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - czeroInfinity := int32(0) - if zeroInfinity { - czeroInfinity = int32(1) - } - lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - czeroInfinity := int32(0) - if zeroInfinity { - czeroInfinity = int32(1) - } - lib.Atg_CudnnCtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, cdeterministic, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CudnnRnn(input *Tensor, weight []Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0))) - - var cweight []lib.Ctensor - for _, t := range weight { - cweight = append(cweight, t.ctensor) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - lib.Atg_CudnnRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, weightBuf.ctensor, hx.ctensor, cx.ctensor, mode, hiddenSize, projSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, len(batchSizes), dropoutState.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, retVal4, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} - - return retVal0, retVal1, retVal2, retVal3, retVal4, err -} - -func _CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cweightArr []lib.Ctensor - for _, t := range weightArr { - cweightArr = append(cweightArr, t.ctensor) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, cbatchFirst, cbidirectional) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CufftGetPlanCacheMaxSize(deviceIndex int64) (retVal int64, err error) { - - retVal = lib.Atg_CufftGetPlanCacheMaxSize(deviceIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func _CufftGetPlanCacheSize(deviceIndex int64) (retVal int64, err error) { - - retVal = lib.Atg_CufftGetPlanCacheSize(deviceIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) _DebugHasInternalOverlap(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.Atg_DebugHasInternalOverlap(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) _DetLuBasedHelper(del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_DetLuBasedHelper(ctensorPtr0, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) _DetLuBasedHelperBackwardHelper(detGrad *Tensor, det *Tensor, lu *Tensor, pivs *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_DetLuBasedHelperBackwardHelper(ptr, detGrad.ctensor, det.ctensor, ts.ctensor, lu.ctensor, pivs.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _DimArange(like *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_DimArange(ptr, like.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Dimi(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.Atg_Dimi(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) _Dimv(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.Atg_Dimv(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func _DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - csparse := int32(0) - if sparse { - csparse = int32(1) - } - cincludeLastOffset := int32(0) - if includeLastOffset { - cincludeLastOffset = int32(1) - } - lib.Atg_EmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func _EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - csparse := int32(0) - if sparse { - csparse = int32(1) - } - lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, paddingIdx) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - csparse := int32(0) - if sparse { - csparse = int32(1) - } - cincludeLastOffset := int32(0) - if includeLastOffset { - cincludeLastOffset = int32(1) - } - lib.Atg_EmbeddingBagForwardOnly(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func _EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode, paddingIdx) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_EmptyAffineQuantized(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, len(size), scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EuclideanDist(x1 *Tensor, x2 *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_EuclideanDist(ptr, x1.ctensor, x2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_FakeQuantizeLearnablePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_FakeQuantizeLearnablePerChannelAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) _FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_FakeQuantizeLearnablePerTensorAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_FakeQuantizeLearnablePerTensorAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) _FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_FakeQuantizePerTensorAffineCachemaskTensorQparams(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, fakeQuantEnabled.ctensor, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _FftC2c(dim []int64, normalization int64, forward bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cforward := int32(0) - if forward { - cforward = int32(1) - } - lib.Atg_FftC2c(ptr, ts.ctensor, dim, len(dim), normalization, cforward) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cforward := int32(0) - if forward { - cforward = int32(1) - } - lib.Atg_FftC2cOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, cforward) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_FftC2r(ptr, ts.ctensor, dim, len(dim), normalization, lastDimSize) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_FftC2rOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, lastDimSize) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FftR2c(dim []int64, normalization int64, onesided bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - conesided := int32(0) - if onesided { - conesided = int32(1) - } - lib.Atg_FftR2c(ptr, ts.ctensor, dim, len(dim), normalization, conesided) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - conesided := int32(0) - if onesided { - conesided = int32(1) - } - lib.Atg_FftR2cOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, conesided) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _FusedDropout(p float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_FusedDropout(ctensorPtr0, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cperRowFakeQuant := int32(0) - if perRowFakeQuant { - cperRowFakeQuant = int32(1) - } - csymmetricQuant := int32(0) - if symmetricQuant { - csymmetricQuant = int32(1) - } - lib.Atg_FusedMovingAvgObsFqHelper(ctensorPtr0, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _FwPrimal(level int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_FwPrimal(ptr, ts.ctensor, level) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.Atg_GridSampler2dCpuFallback(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.Atg_GridSampler2dCpuFallbackBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _HasCompatibleShallowCopyType(from *Tensor, del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.Atg_HasCompatibleShallowCopyType(ts.ctensor, from.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) _IndexCopy_(dim int64, index *Tensor, source *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_IndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _Indices(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Indices(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _InverseHelper(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_InverseHelper(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _LinalgInvOutHelper_(infosLu *Tensor, infosGetri *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_LinalgInvOutHelper_(ptr, ts.ctensor, infosLu.ctensor, infosGetri.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _LinalgQrHelper(mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_LinalgQrHelper(ctensorPtr0, ts.ctensor, mode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { - chalfToFloat = int32(1) - } - lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_LogSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { - chalfToFloat = int32(1) - } - lib.Atg_LogSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Logcumsumexp(dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Logcumsumexp(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _LogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_LogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _LuWithInfo(pivot bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - cpivot := int32(0) - if pivot { - cpivot = int32(1) - } - ccheckErrors := int32(0) - if checkErrors { - ccheckErrors = int32(1) - } - lib.Atg_LuWithInfo(ctensorPtr0, ts.ctensor, cpivot, ccheckErrors) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func _MakeDual(primal *Tensor, tangent *Tensor, level int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MakeDual(ptr, primal.ctensor, tangent.ctensor, level) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _MaskedScale(mask *Tensor, scale float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _MkldnnReshape(shape []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, len(shape)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _NegView(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_NegView(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _NnpackAvailable() (retVal bool, err error) { - - retVal = lib.Atg_NnpackAvailable() - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func _NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _NnpackSpatialConvolutionBackwardInput(input *Tensor, gradOutput *Tensor, weight *Tensor, padding []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_NnpackSpatialConvolutionBackwardInput(ptr, input.ctensor, gradOutput.ctensor, weight.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int64, gradOutput *Tensor, padding []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_NnpackSpatialConvolutionBackwardWeight(ptr, input.ctensor, weightsize, len(weightsize), gradOutput.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Nnz(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.Atg_Nnz(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func _PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.Atg_PackPaddedSequence(ctensorPtr0, input.ctensor, lengths.ctensor, cbatchFirst) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func _PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, len(inputSize), batchSizes.ctensor, cbatchFirst) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.Atg_PadPackedSequence(ctensorPtr0, data.ctensor, batchSizes.ctensor, cbatchFirst, paddingValue.cscalar, totalLength) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _PinMemory(device gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_PinMemory(ptr, ts.ctensor, device.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_RemoveBatchDim(ptr, ts.ctensor, level, batchSize, outDim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ReshapeAlias(size []int64, stride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ReshapeAlias(ptr, ts.ctensor, size, len(size), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ReshapeFromTensor(shape *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_RowwisePrune(ctensorPtr0, weight.ctensor, mask.ctensor, compressedIndicesDtype.CInt()) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _SWhere(condition *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SWhere(ptr, condition.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SampleDirichlet(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SampleDirichlet(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SaturateWeightToFp16(weight *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SaturateWeightToFp16(ptr, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, axis int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SegmentReduceBackward(ptr, grad.ctensor, output.ctensor, data.ctensor, reduce, lengths.ctensor, axis) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ShapeAsTensor(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ShapeAsTensor(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_SobolEngineDraw(ctensorPtr0, quasi.ctensor, n, sobolstate.ctensor, dimension, numGenerated, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _SobolEngineInitializeState_(dimension int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _SobolEngineScramble_(ltm *Tensor, dimension int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) _Softmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { - chalfToFloat = int32(1) - } - lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SoftmaxBackwardDataOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { - chalfToFloat = int32(1) - } - lib.Atg_SoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SolveHelper(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_SolveHelper(ctensorPtr0, ts.ctensor, a.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _SparseAddmm(sparse *Tensor, dense *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseAddmm(ptr, ts.ctensor, sparse.ctensor, dense.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, len(size), indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseCsrTensorUnsafe(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseLogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { - chalfToFloat = int32(1) - } - lib.Atg_SparseLogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseLogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseLogSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseMaskHelper(t *Tensor, maskIndices *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseMaskHelper(ptr, t.ctensor, maskIndices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { - chalfToFloat = int32(1) - } - lib.Atg_SparseSoftmax(ptr, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSparseMatmul(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSparseMatmul(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSum(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSum(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSumBackward(grad *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, len(dim)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSumDim(dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSumDim(ptr, ts.ctensor, dim, len(dim)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSumDimDtype(ptr, ts.ctensor, dim, len(dim), dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SparseSumDtype(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSumDtype(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _Stack(tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.Atg_Stack(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _StackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.Atg_StackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _StandardGamma(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_StandardGamma(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _StandardGammaGrad(output *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _SvdHelper(some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - csome := int32(0) - if some { - csome = int32(1) - } - ccomputeUv := int32(0) - if computeUv { - ccomputeUv = int32(1) - } - lib.Atg_SvdHelper(ctensorPtr0, ts.ctensor, csome, ccomputeUv) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) _SymeigHelper(eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ceigenvectors := int32(0) - if eigenvectors { - ceigenvectors = int32(1) - } - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.Atg_SymeigHelper(ctensorPtr0, ts.ctensor, ceigenvectors, cupper) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func _TestAmbiguousDefaults(dummy *Tensor, a int64, b int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_TestAmbiguousDefaults(ptr, dummy.ctensor, a, b) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_TestAmbiguousDefaultsB(ptr, dummy.ctensor, a, b) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _TestOptionalFilledIntlist(values *Tensor, addends []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_TestOptionalFilledIntlist(ptr, values.ctensor, addends, len(addends)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _TestOptionalIntlist(values *Tensor, addends []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_TestOptionalIntlist(ptr, values.ctensor, addends, len(addends)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _TestSerializationSubcmul(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_TestSerializationSubcmul(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _TestStringDefault(dummy *Tensor, a string, b string) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_TestStringDefault(ptr, dummy.ctensor, a, b) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.Atg_ToCopy(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, len(expand1), expand2, len(expand2), expand3, len(expand3), sumdim, len(sumdim), unrollDim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Unique(sorted bool, returnInverse bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - csorted := int32(0) - if sorted { - csorted = int32(1) - } - creturnInverse := int32(0) - if returnInverse { - creturnInverse = int32(1) - } - lib.Atg_Unique(ctensorPtr0, ts.ctensor, csorted, creturnInverse) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - csorted := int32(0) - if sorted { - csorted = int32(1) - } - creturnInverse := int32(0) - if returnInverse { - creturnInverse = int32(1) - } - creturnCounts := int32(0) - if returnCounts { - creturnCounts = int32(1) - } - lib.Atg_Unique2(ctensorPtr0, ts.ctensor, csorted, creturnInverse, creturnCounts) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func _UnpackDual(dual *Tensor, level int64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_UnpackDual(ctensorPtr0, dual.ctensor, level) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) _UnsafeView(size []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_UnsafeView(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64) (retVal bool, err error) { - - retVal = lib.Atg_UseCudnnCtcLoss(logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func _UseCudnnRnnFlattenWeight() (retVal bool, err error) { - - retVal = lib.Atg_UseCudnnRnnFlattenWeight() - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) _Values(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Values(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) _Version(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.Atg_Version(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func _WeightNorm(v *Tensor, g *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func _WeightNormCudaInterface(v *Tensor, g *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_WeightNormCudaInterface(ctensorPtr0, v.ctensor, g.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func _WeightNormCudaInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_WeightNormCudaInterfaceBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func _WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.Atg_WeightNormDifferentiableBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Abs(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbs(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Abs_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbs_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AbsOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Absolute(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbsolute(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Absolute_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbsolute_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AbsoluteOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbsoluteOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Acos(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcos(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Acos_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcos_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AcosOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Acosh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcosh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Acosh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcosh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AcoshOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcoshOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool3dBackward(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveMaxPool1d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgAdaptiveMaxPool1d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) AdaptiveMaxPool2d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgAdaptiveMaxPool2d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) AdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgAdaptiveMaxPool2dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) AdaptiveMaxPool3d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgAdaptiveMaxPool3d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) AdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgAdaptiveMaxPool3dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Add(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdd(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Add_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdd_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AddOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AddScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AddScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Addbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addbmm_(batch1 *Tensor, batch2 *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addcdiv_(tensor1 *Tensor, tensor2 *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addcmul(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addcmul_(tensor1 *Tensor, tensor2 *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addmm_(mat1 *Tensor, mat2 *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addmv(mat *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addmv_(mat *Tensor, vec *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Addr_(vec1 *Tensor, vec2 *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func AffineGridGenerator(theta *Tensor, size []int64, alignCorners bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, len(size), calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func AffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, len(size), calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Alias(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAlias(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AlignAs(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) All(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAll(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AllAllOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAllAllOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AllDim(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAllDim(ptr, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AllOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Allclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - cequalNan := int32(0) - if equalNan { - cequalNan = int32(1) - } - retVal = lib.AtgAllclose(ts.ctensor, other.ctensor, rtol, atol, cequalNan) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func AlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AlphaDropout_(p float64, train bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Amax(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAmax(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AmaxOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAmaxOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Amin(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAmin(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AminOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAminOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Aminmax(dim []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAminmax(ctensorPtr0, ts.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) AminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAminmaxOut(ctensorPtr0, min.ctensor, max.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Angle(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAngle(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AngleOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Any(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAny(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AnyAllOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAnyAllOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AnyDim(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAnyDim(ptr, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AnyOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Arange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ArangeOut(out *Tensor, end *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeOut(ptr, out.ctensor, end.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeStart(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ArangeStartOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeStartOut(ptr, out.ctensor, start.cscalar, end.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ArangeStartStep(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeStartStep(ptr, start.cscalar, end.cscalar, step.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arccos(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArccos(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arccos_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArccos_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ArccosOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArccosOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arccosh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArccosh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arccosh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArccosh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ArccoshOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArccoshOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arcsin(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArcsin(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arcsin_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArcsin_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ArcsinOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArcsinOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arcsinh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArcsinh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arcsinh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArcsinh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ArcsinhOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArcsinhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arctan(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArctan(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arctan_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArctan_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ArctanOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArctanOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arctanh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArctanh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Arctanh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArctanh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ArctanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArctanhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Argmax(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgArgmax(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgArgmaxOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Argmin(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgArgmin(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ArgminOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgArgminOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Argsort(dim int64, descending bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cdescending := int32(0) - if descending { - cdescending = int32(1) - } - lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cstorageOffsetVal int64 = 0 - var cstorageOffsetNull int = 1 - if len(storageOffset) > 0 { - cstorageOffsetVal = storageOffset[0] - cstorageOffsetNull = 0 - } - lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AsStrided_(size []int64, stride []int64, storageOffset []int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cstorageOffsetVal int64 = 0 - var cstorageOffsetNull int = 1 - if len(storageOffset) > 0 { - cstorageOffsetVal = storageOffset[0] - cstorageOffsetNull = 0 - } - lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Asin(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsin(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Asin_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsin_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AsinOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Asinh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsinh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Asinh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsinh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AsinhOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsinhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atan(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atan2(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan2(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atan2_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Atan2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atan_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AtanOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atanh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtanh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atanh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtanh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) AtanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtanhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atleast1d(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtleast1d(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atleast2d(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtleast2d(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Atleast3d(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtleast3d(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - ccountIncludePad := int32(0) - if countIncludePad { - ccountIncludePad = int32(1) - } - var cdivisorOverrideVal int64 = 0 - var cdivisorOverrideNull int = 1 - if len(divisorOverride) > 0 { - cdivisorOverrideVal = divisorOverride[0] - cdivisorOverrideNull = 0 - } - lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Baddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Baddbmm_(batch1 *Tensor, batch2 *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgBartlettWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - ccudnnEnabled := int32(0) - if cudnnEnabled { - ccudnnEnabled = int32(1) - } - lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor, count.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - cinputG := int32(0) - if inputG { - cinputG = int32(1) - } - cweightG := int32(0) - if weightG { - cweightG = int32(1) - } - cbiasG := int32(0) - if biasG { - cbiasG = int32(1) - } - lib.AtgBatchNormBackwardReduce(ctensorPtr0, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, cinputG, cweightG, cbiasG) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func BatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgBatchNormGatherStats(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, count) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func BatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgBatchNormGatherStatsWithCounts(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, counts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func BatchNormStats(input *Tensor, eps float64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgBatchNormStats(ctensorPtr0, input.ctensor, eps) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func BatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgBatchNormUpdateStats(ctensorPtr0, input.ctensor, runningMean.ctensor, runningVar.ctensor, momentum) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Bernoulli(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulli(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Bernoulli_(p *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BernoulliFloat_(p float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulliFloat_(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BernoulliOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulliOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BernoulliP(p float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulliP(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Bilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyWithLogitsBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Bincount(weights *Tensor, minlength int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Binomial(count *Tensor, prob *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinomial(ptr, count.ctensor, prob.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseAnd(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseAnd_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseAndScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAndScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseAndTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAndTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseAndTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAndTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseAndTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAndTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseLeftShift(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseLeftShift(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseLeftShift_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseLeftShift_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func BitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseLeftShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseLeftShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseLeftShiftTensorScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseLeftShiftTensorScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseLeftShiftTensorScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseLeftShiftTensorScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseLeftShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseNot(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseNot(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseNot_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseNot_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseNotOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseOr(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseOr_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseOrScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOrScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseOrTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOrTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseOrTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOrTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseOrTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOrTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseRightShift(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseRightShift(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseRightShift_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseRightShift_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func BitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseRightShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseRightShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseRightShiftTensorScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseRightShiftTensorScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseRightShiftTensorScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseRightShiftTensorScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseRightShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseXor(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseXor_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseXorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseXorTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXorTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BitwiseXorTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXorTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) BitwiseXorTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXorTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgBlackmanWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BlockDiag(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgBlockDiag(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Bmm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BmmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BroadcastTo(size []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBroadcastTo(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Bucketize(boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - cright := int32(0) - if right { - cright = int32(1) - } - lib.AtgBucketize(ptr, ts.ctensor, boundaries.ctensor, coutInt32, cright) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func BucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - cright := int32(0) - if right { - cright = int32(1) - } - lib.AtgBucketizeScalar(ptr, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) BucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - cright := int32(0) - if right { - cright = int32(1) - } - lib.AtgBucketizeTensorOut(ptr, out.ctensor, ts.ctensor, boundaries.ctensor, coutInt32, cright) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CanCast(from gotch.DType, to gotch.DType) (retVal bool, err error) { - - retVal = lib.AtgCanCast(from.CInt(), to.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func CartesianProd(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgCartesianProd(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Cat(tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgCat(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cauchy_(median float64, sigma float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCauchy_(ptr, ts.ctensor, median, sigma) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ccomputeModeVal int64 = 0 - var ccomputeModeNull int = 1 - if len(computeMode) > 0 { - ccomputeModeVal = computeMode[0] - ccomputeModeNull = 0 - } - lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, ccomputeModeVal, ccomputeModeNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ceil(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCeil(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ceil_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCeil_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) CeilOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Celu(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Celu_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func ChainMatmul(matrices []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cmatrices []lib.Ctensor - for _, t := range matrices { - cmatrices = append(cmatrices, t.ctensor) - } - lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ChainMatmulOut(out *Tensor, matrices []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cmatrices []lib.Ctensor - for _, t := range matrices { - cmatrices = append(cmatrices, t.ctensor) - } - lib.AtgChainMatmulOut(ptr, out.ctensor, cmatrices, len(cmatrices)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ChannelShuffle(groups int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgChannelShuffle(ptr, ts.ctensor, groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cholesky(upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgCholesky(ptr, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CholeskyInverse(upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CholeskyInverseOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CholeskySolve(input2 *Tensor, upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgChooseQparamsOptimized(ctensorPtr0, input.ctensor, numel, nBins, ratio, bitWidth) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Clamp(min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Clamp_(min *Scalar, max *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClampMax(max *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMax(ptr, ts.ctensor, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampMax_(max *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClampMaxOut(out *Tensor, max *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampMaxTensor(max *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMaxTensor(ptr, ts.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampMaxTensor_(max *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMaxTensor_(ptr, ts.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClampMaxTensorOut(out *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMaxTensorOut(ptr, out.ctensor, ts.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampMin(min *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMin(ptr, ts.ctensor, min.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampMin_(min *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClampMinOut(out *Tensor, min *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampMinTensor(min *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMinTensor(ptr, ts.ctensor, min.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampMinTensor_(min *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMinTensor_(ptr, ts.ctensor, min.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClampMinTensorOut(out *Tensor, min *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMinTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampTensor(min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampTensor(ptr, ts.ctensor, min.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClampTensor_(min *Tensor, max *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Clip(min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClip(ptr, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Clip_(min *Scalar, max *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClip_(ptr, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClipOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClipOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClipTensor(min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClipTensor(ptr, ts.ctensor, min.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ClipTensor_(min *Tensor, max *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClipTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClipTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Coalesce(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCoalesce(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Col2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2im(ptr, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Col2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2imBackward(ptr, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Col2imBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2imBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Col2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ColIndices(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgColIndices(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ColumnStack(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgColumnStack(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ColumnStackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgColumnStackOut(ptr, out.ctensor, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Combinations(r int64, withReplacement bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cwithReplacement := int32(0) - if withReplacement { - cwithReplacement = int32(1) - } - lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Complex(real *Tensor, imag *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgComplex(ptr, real.ctensor, imag.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ComplexOut(out *Tensor, real *Tensor, imag *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgComplexOut(ptr, out.ctensor, real.ctensor, imag.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Concat(tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgConcat(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConcatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgConcatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Conj(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConj(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ConjPhysical(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConjPhysical(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ConjPhysical_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConjPhysical_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ConjPhysicalOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConjPhysicalOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ConstantPadNd(pad []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConstantPadNd(ptr, ts.ctensor, pad, len(pad)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Contiguous(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgContiguous(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv1dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv2dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv3dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvDepthwise3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ConvDepthwise3dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgConvDepthwise3dBackward(ctensorPtr0, gradInput.ctensor, gradWeight.ctensor, gradBias.ctensor, gradOutput.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) ConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgConvTbcBackward(ctensorPtr0, ts.ctensor, input.ctensor, weight.ctensor, bias.ctensor, pad) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func ConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { - ctransposed = int32(1) - } - lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { - ctransposed = int32(1) - } - lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CopySparseToSparse_(src *Tensor, nonBlocking bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Copysign(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCopysign(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Copysign_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCopysign_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) CopysignOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCopysignOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CopysignScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCopysignScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CopysignScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCopysignScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) CopysignScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCopysignScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Corrcoef(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCorrcoef(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cos(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCos(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cos_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCos_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) CosOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cosh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cosh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) CoshOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CountNonzero(dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - lib.AtgCountNonzero(ptr, ts.ctensor, cdimVal, cdimNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CountNonzeroDimIntlist(dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCountNonzeroDimIntlist(ptr, ts.ctensor, dim, len(dim)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cov(correction int64, fweights *Tensor, aweights *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCov(ptr, ts.ctensor, correction, fweights.ctensor, aweights.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cross(other *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - lib.AtgCross(ptr, ts.ctensor, other.ctensor, cdimVal, cdimNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCrossEntropyLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, labelSmoothing) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, cdimVal, cdimNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CrowIndices(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCrowIndices(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - czeroInfinity := int32(0) - if zeroInfinity { - czeroInfinity = int32(1) - } - lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, reduction, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - czeroInfinity := int32(0) - if zeroInfinity { - czeroInfinity = int32(1) - } - lib.AtgCtcLossTensor(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgCudnnBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func CudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgCudnnBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon, reserveSpace.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) CudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - callowTf32 := int32(0) - if allowTf32 { - callowTf32 = int32(1) - } - lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnConvolutionAddRelu(ptr, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - callowTf32 := int32(0) - if allowTf32 { - callowTf32 = int32(1) - } - lib.AtgCudnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - callowTf32 := int32(0) - if allowTf32 { - callowTf32 = int32(1) - } - lib.AtgCudnnConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionDeprecated(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgCudnnConvolutionDeprecated(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionDeprecated2(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgCudnnConvolutionDeprecated2(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnConvolutionRelu(ptr, ts.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - callowTf32 := int32(0) - if allowTf32 { - callowTf32 = int32(1) - } - lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - callowTf32 := int32(0) - if allowTf32 { - callowTf32 = int32(1) - } - lib.AtgCudnnConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - callowTf32 := int32(0) - if allowTf32 { - callowTf32 = int32(1) - } - lib.AtgCudnnConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionTransposeDeprecated(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgCudnnConvolutionTransposeDeprecated(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionTransposeDeprecated2(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgCudnnConvolutionTransposeDeprecated2(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnGridSampler(grid *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgCudnnGridSamplerBackward(ctensorPtr0, ts.ctensor, grid.ctensor, gradOutput.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) CudnnIsAcceptable(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgCudnnIsAcceptable(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) Cummax(dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgCummax(ctensorPtr0, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) CummaxOut(values *Tensor, indices *Tensor, dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgCummaxOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func CummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCummaxminBackward(ptr, grad.ctensor, input.ctensor, indices.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cummin(dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgCummin(ctensorPtr0, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) CumminOut(values *Tensor, indices *Tensor, dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgCumminOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cumprod_(dim int64, dtype gotch.DType) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumprod_(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func CumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumprodBackward(ptr, grad.ctensor, input.ctensor, dim, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cumsum(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Cumsum_(dim int64, dtype gotch.DType) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumsum_(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) CumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CumulativeTrapezoid(y *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumulativeTrapezoid(ptr, y.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func CumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumulativeTrapezoidX(ptr, y.ctensor, x.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Data(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgData(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Deg2rad(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDeg2rad(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Deg2rad_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDeg2rad_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Deg2radOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDeg2radOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DenseDim(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgDenseDim(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) Dequantize(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDequantize(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Det(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDet(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Detach(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDetach(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Detach_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDetach_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Diag(diagonal int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiag(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func DiagBackward(grad *Tensor, inputSizes []int64, diagonal int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagBackward(ptr, grad.ctensor, inputSizes, len(inputSizes), diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DiagOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Diagflat(offset int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagflat(ptr, ts.ctensor, offset) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func DiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagonalBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), offset, dim1, dim2) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Diff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiff(ptr, ts.ctensor, n, dim, prepend.ctensor, append.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiffOut(ptr, out.ctensor, ts.ctensor, n, dim, prepend.ctensor, append.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Digamma(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDigamma(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Digamma_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDigamma_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) DigammaOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Dist(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDist(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Div(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiv(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Div_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiv_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) DivOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) DivScalarMode(other *Scalar, roundingMode string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivScalarMode_(other *Scalar, roundingMode string) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) DivTensorMode(other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivTensorMode_(other *Tensor, roundingMode string) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Divide(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivide(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Divide_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivide_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) DivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivideScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivideScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) DivideScalarMode(other *Scalar, roundingMode string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivideScalarMode_(other *Scalar, roundingMode string) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) DivideTensorMode(other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DivideTensorMode_(other *Tensor, roundingMode string) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivideTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Dot(tensor *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDot(ptr, ts.ctensor, tensor.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) DotOut(out *Tensor, tensor *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Dropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Dropout_(p float64, train bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func Dstack(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgDstack(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func DstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgDstackOut(ptr, out.ctensor, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Eig(eigenvectors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ceigenvectors := int32(0) - if eigenvectors { - ceigenvectors = int32(1) - } - lib.AtgEig(ctensorPtr0, ts.ctensor, ceigenvectors) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) EigE(e *Tensor, v *Tensor, eigenvectors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ceigenvectors := int32(0) - if eigenvectors { - ceigenvectors = int32(1) - } - lib.AtgEigE(ctensorPtr0, e.ctensor, v.ctensor, ts.ctensor, ceigenvectors) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func Einsum(equation string, tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgEinsum(ptr, equation, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Elu(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgElu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Elu_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgElu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func EluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cisResult := int32(0) - if isResult { - cisResult = int32(1) - } - lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cisResult := int32(0) - if isResult { - cisResult = int32(1) - } - lib.AtgEluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) EluOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEluOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Embedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - csparse := int32(0) - if sparse { - csparse = int32(1) - } - lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - csparse := int32(0) - if sparse { - csparse = int32(1) - } - lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - csparse := int32(0) - if sparse { - csparse = int32(1) - } - cincludeLastOffset := int32(0) - if includeLastOffset { - cincludeLastOffset = int32(1) - } - lib.AtgEmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func EmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - csparse := int32(0) - if sparse { - csparse = int32(1) - } - cincludeLastOffset := int32(0) - if includeLastOffset { - cincludeLastOffset = int32(1) - } - var cpaddingIdxVal int64 = 0 - var cpaddingIdxNull int = 1 - if len(paddingIdx) > 0 { - cpaddingIdxVal = paddingIdx[0] - cpaddingIdxNull = 0 - } - lib.AtgEmbeddingBagPaddingIdx(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, cpaddingIdxVal, cpaddingIdxNull) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func EmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) EmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func EmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { - cscaleGradByFreq = int32(1) - } - lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmpty(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) EmptyLike(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmptyLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmptyOut(out *Tensor, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmptyOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmptyQuantized(ptr, size, len(size), qtensor.ctensor, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmptyStrided(ptr, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Eq(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEq(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Eq_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEq_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) EqScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEqScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) EqTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEqTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) EqTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEqTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) EqTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEqTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Equal(other *Tensor, del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgEqual(ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) Erf(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErf(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Erf_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErf_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ErfOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Erfc(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfc(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Erfc_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfc_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ErfcOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Erfinv(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfinv(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Erfinv_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfinv_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ErfinvOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Exp(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExp(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Exp2(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExp2(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Exp2_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExp2_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Exp2Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExp2Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Exp_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExp_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ExpOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Expand(size []int64, implicit bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cimplicit := int32(0) - if implicit { - cimplicit = int32(1) - } - lib.AtgExpand(ptr, ts.ctensor, size, len(size), cimplicit) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ExpandAs(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Expm1(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpm1(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Expm1_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpm1_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Expm1Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Exponential_(lambd float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExponential_(ptr, ts.ctensor, lambd) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEyeM(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EyeMOut(out *Tensor, n int64, m int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEyeMOut(ptr, out.ctensor, n, m) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func EyeOut(out *Tensor, n int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEyeOut(ptr, out.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFakeQuantizePerChannelAffineCachemask(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func FakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerChannelAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFakeQuantizePerTensorAffineCachemask(ctensorPtr0, ts.ctensor, scale, zeroPoint, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func FakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerTensorAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerTensorAffineTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmPackGemmMatrixFp16(input *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmPackQuantizedMatrix(input *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmPackQuantizedMatrixKn(ptr, input.ctensor, k, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FeatureAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FeatureAlphaDropout_(p float64, train bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func FeatureDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FeatureDropout_(p float64, train bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { - ctrain = int32(1) - } - lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FftFft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftFft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftFft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftFft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftFft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftFftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftFftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FftFftfreqOut(out *Tensor, n int64, d float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftFftfreqOut(ptr, out.ctensor, n, d) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftFftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftFftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftFftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftFftshift(dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftFftshift(ptr, ts.ctensor, dim, len(dim)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftHfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftHfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftHfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftIfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftIfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIfftshift(dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIfftshift(ptr, ts.ctensor, dim, len(dim)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIhfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftIhfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftIhfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIrfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftIrfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIrfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIrfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIrfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftIrfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIrfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIrfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftIrfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftRfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftRfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftRfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftRfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftRfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - lib.AtgFftRfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftRfftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FftRfftfreqOut(out *Tensor, n int64, d float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftRfftfreqOut(ptr, out.ctensor, n, d) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftRfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftRfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFftRfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Fill_(value *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFill_(ptr, ts.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FillDiagonal_(fillValue *Scalar, wrap bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cwrap := int32(0) - if wrap { - cwrap = int32(1) - } - lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FillTensor_(value *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFillTensor_(ptr, ts.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Fix(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFix(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Fix_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFix_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FixOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFixOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Flatten(startDim int64, endDim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FlattenDenseTensors(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgFlattenDenseTensors(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Flip(dims []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFlip(ptr, ts.ctensor, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Fliplr(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFliplr(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Flipud(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFlipud(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloatPower(exponent *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPower(ptr, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloatPower_(exponent *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPower_(ptr, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func FloatPowerScalar(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPowerScalar(ptr, selfScalar.cscalar, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPowerScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloatPowerTensor_(exponent *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPowerTensor_(ptr, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FloatPowerTensorScalar(exponent *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPowerTensorScalar(ptr, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPowerTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloatPowerTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Floor(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloor(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Floor_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloor_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FloorDivide(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloorDivide_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FloorDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloorDivideScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivideScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FloorDivideScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivideScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FloorOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Fmax(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmax(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FmaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Fmin(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmin(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FminOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFminOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Fmod(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmod(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Fmod_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmod_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FmodScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmodScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FmodTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmodTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FmodTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmodTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FmodTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmodTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Frac(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFrac(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Frac_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFrac_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) FracOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFracOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFractionalMaxPool2d(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) FractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFractionalMaxPool2dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) FractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFractionalMaxPool3d(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) FractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFractionalMaxPool3dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Frexp(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFrexp(ctensorPtr0, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) FrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgFrexpTensorOut(ctensorPtr0, mantissa.ctensor, exponent.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) FrobeniusNorm(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFrobeniusNorm(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FrobeniusNormDim(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgFrobeniusNormDim(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cshared := int32(0) - if shared { - cshared = int32(1) - } - var csizeVal int64 = 0 - var csizeNull int = 1 - if len(size) > 0 { - csizeVal = size[0] - csizeNull = 0 - } - lib.AtgFromFile(ptr, filename, cshared, csizeVal, csizeNull, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Full(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFull(ptr, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FullLike(fillValue *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func FullOut(out *Tensor, size []int64, fillValue *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFullOut(ptr, out.ctensor, size, len(size), fillValue.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) FusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperRowFakeQuant := int32(0) - if perRowFakeQuant { - cperRowFakeQuant = int32(1) - } - csymmetricQuant := int32(0) - if symmetricQuant { - csymmetricQuant = int32(1) - } - lib.AtgFusedMovingAvgObsFakeQuant(ptr, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Gather(dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csparseGrad := int32(0) - if sparseGrad { - csparseGrad = int32(1) - } - lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csparseGrad := int32(0) - if sparseGrad { - csparseGrad = int32(1) - } - lib.AtgGatherBackward(ptr, grad.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csparseGrad := int32(0) - if sparseGrad { - csparseGrad = int32(1) - } - lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Gcd(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGcd(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Gcd_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGcd_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GcdOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGcdOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ge(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGe(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ge_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGe_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GeTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GeTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Gelu(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GeluBackward(grad *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeluBackward(ptr, grad.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GeluBackwardGradInput(gradInput *Tensor, grad *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeluBackwardGradInput(ptr, gradInput.ctensor, grad.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GeluOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeluOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Geometric_(p float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeometric_(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Geqrf(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgGeqrf(ctensorPtr0, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) GeqrfA(a *Tensor, tau *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgGeqrfA(ctensorPtr0, a.ctensor, tau.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Ger(vec2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGer(ptr, ts.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GerOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Glu(dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGlu(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GluBackward(gradOutput *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GluOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Grad(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGrad(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Greater(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreater(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Greater_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreater_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GreaterEqual(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterEqual(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GreaterEqual_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterEqual_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GreaterEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GreaterEqualTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterEqualTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GreaterEqualTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterEqualTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GreaterEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GreaterScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GreaterTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GreaterTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GreaterTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGreaterTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func GridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func GridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func GridSampler2dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.AtgGridSampler2dBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func GridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func GridSampler3dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - lib.AtgGridSampler3dBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func GroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ccudnnEnabled := int32(0) - if cudnnEnabled { - ccudnnEnabled = int32(1) - } - lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Gru(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.AtgGru(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func GruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func GruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - lib.AtgGruData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Gt(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGt(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Gt_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGt_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GtScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GtTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGtTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) GtTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGtTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) GtTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgHammingWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgHammingWindowPeriodicAlpha(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgHammingWindowPeriodicAlphaBeta(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgHannWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hardshrink(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardshrink(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOut.ctensor, ts.ctensor, lambd.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardshrinkOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardshrinkOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hardsigmoid(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoid(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hardsigmoid_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoid_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) HardsigmoidBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardsigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hardswish(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardswish(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hardswish_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardswish_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) HardswishBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardswishBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardswishOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardswishOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hardtanh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hardtanh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) HardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HardtanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Heaviside(values *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHeaviside(ptr, ts.ctensor, values.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Heaviside_(values *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHeaviside_(ptr, ts.ctensor, values.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) HeavisideOut(out *Tensor, values *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHeavisideOut(ptr, out.ctensor, ts.ctensor, values.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Histc(bins int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHistc(ptr, ts.ctensor, bins) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HistcOut(out *Tensor, bins int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Hspmm(mat1 *Tensor, mat2 *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Hstack(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgHstack(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func HstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgHstackOut(ptr, out.ctensor, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HuberLoss(target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHuberLoss(ptr, ts.ctensor, target.ctensor, reduction, delta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHuberLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHuberLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) HuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHuberLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, delta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hypot(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHypot(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Hypot_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHypot_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) HypotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHypotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) I0(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgI0(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) I0_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgI0_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) I0Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgI0Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Igamma(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIgamma(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Igamma_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIgamma_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) IgammaOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIgammaOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Igammac(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIgammac(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Igammac_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIgammac_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) IgammacOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIgammacOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Im2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2col(ptr, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Im2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2colBackward(ptr, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Im2colBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2colBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Im2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Imag(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgImag(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IndexAdd(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IndexAdd_(dim int64, index *Tensor, source *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) IndexAddAlpha(dim int64, index *Tensor, source *Tensor, alpha *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexAddAlpha(ptr, ts.ctensor, dim, index.ctensor, source.ctensor, alpha.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IndexAddAlpha_(dim int64, index *Tensor, source *Tensor, alpha *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexAddAlpha_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor, alpha.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) IndexCopy(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IndexCopy_(dim int64, index *Tensor, source *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) IndexFill(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IndexFill_(dim int64, index *Tensor, value *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) IndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFillIntTensor(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IndexFillIntTensor_(dim int64, index *Tensor, value *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFillIntTensor_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func IndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexSelectBackward(ptr, grad.ctensor, selfSizes, len(selfSizes), dim, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Indices(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndices(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) InfinitelyDifferentiableGeluBackward(grad *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgInfinitelyDifferentiableGeluBackward(ptr, grad.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Inner(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgInner(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) InnerOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgInnerOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func InstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cuseInputStats := int32(0) - if useInputStats { - cuseInputStats = int32(1) - } - ccudnnEnabled := int32(0) - if cudnnEnabled { - ccudnnEnabled = int32(1) - } - lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IntRepr(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIntRepr(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Inverse(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgInverse(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) InverseOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IsCoalesced(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsCoalesced(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsComplex(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsComplex(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsConj(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsConj(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsDistributed(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsDistributed(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsFloatingPoint(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsFloatingPoint(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsInference(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsInference(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsLeaf(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsLeaf(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsNeg(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsNeg(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsNonzero(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsNonzero(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsPinned(device gotch.Device, del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsPinned(ts.ctensor, device.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsSameSize(other *Tensor, del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsSameSize(ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsSetTo(tensor *Tensor, del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsSetTo(ts.ctensor, tensor.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) IsSigned(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgIsSigned(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func IsVulkanAvailable() (retVal bool, err error) { - - retVal = lib.AtgIsVulkanAvailable() - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) Isclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cequalNan := int32(0) - if equalNan { - cequalNan = int32(1) - } - lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Isfinite(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsfinite(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Isin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cassumeUnique := int32(0) - if assumeUnique { - cassumeUnique = int32(1) - } - cinvert := int32(0) - if invert { - cinvert = int32(1) - } - lib.AtgIsin(ptr, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func IsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cassumeUnique := int32(0) - if assumeUnique { - cassumeUnique = int32(1) - } - cinvert := int32(0) - if invert { - cinvert = int32(1) - } - lib.AtgIsinScalarTensor(ptr, element.cscalar, testElements.ctensor, cassumeUnique, cinvert) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func IsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cassumeUnique := int32(0) - if assumeUnique { - cassumeUnique = int32(1) - } - cinvert := int32(0) - if invert { - cinvert = int32(1) - } - lib.AtgIsinScalarTensorOut(ptr, out.ctensor, element.cscalar, testElements.ctensor, cassumeUnique, cinvert) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func IsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cassumeUnique := int32(0) - if assumeUnique { - cassumeUnique = int32(1) - } - cinvert := int32(0) - if invert { - cinvert = int32(1) - } - lib.AtgIsinTensorScalar(ptr, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func IsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cassumeUnique := int32(0) - if assumeUnique { - cassumeUnique = int32(1) - } - cinvert := int32(0) - if invert { - cinvert = int32(1) - } - lib.AtgIsinTensorScalarOut(ptr, out.ctensor, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func IsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cassumeUnique := int32(0) - if assumeUnique { - cassumeUnique = int32(1) - } - cinvert := int32(0) - if invert { - cinvert = int32(1) - } - lib.AtgIsinTensorTensorOut(ptr, out.ctensor, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Isinf(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsinf(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Isnan(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsnan(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Isneginf(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsneginf(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IsneginfOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsneginfOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Isposinf(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsposinf(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) IsposinfOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsposinfOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Isreal(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsreal(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Istft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var chopLengthVal int64 = 0 - var chopLengthNull int = 1 - if len(hopLength) > 0 { - chopLengthVal = hopLength[0] - chopLengthNull = 0 - } - var cwinLengthVal int64 = 0 - var cwinLengthNull int = 1 - if len(winLength) > 0 { - cwinLengthVal = winLength[0] - cwinLengthNull = 0 - } - ccenter := int32(0) - if center { - ccenter = int32(1) - } - cnormalized := int32(0) - if normalized { - cnormalized = int32(1) - } - conesided := int32(0) - if onesided { - conesided = int32(1) - } - var clengthVal int64 = 0 - var clengthNull int = 1 - if len(length) > 0 { - clengthVal = length[0] - clengthNull = 0 - } - creturnComplex := int32(0) - if returnComplex { - creturnComplex = int32(1) - } - lib.AtgIstft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, cnormalized, conesided, clengthVal, clengthNull, creturnComplex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func KaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgKaiserWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func KaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgKaiserWindowBeta(ptr, windowLength, cperiodic, beta, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func KaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { - cperiodic = int32(1) - } - lib.AtgKaiserWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) KlDiv(target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - clogTarget := int32(0) - if logTarget { - clogTarget = int32(1) - } - lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction, clogTarget) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) KlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - clogTarget := int32(0) - if logTarget { - clogTarget = int32(1) - } - lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, clogTarget) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Kron(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgKron(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) KronOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgKronOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Kthvalue(k int64, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgKthvalue(ctensorPtr0, ts.ctensor, k, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) KthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgKthvalueValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) L1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) L1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) L1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) L1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ccudnnEnable := int32(0) - if cudnnEnable { - ccudnnEnable = int32(1) - } - lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps, ccudnnEnable) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Lcm(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLcm(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Lcm_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLcm_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LcmOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLcmOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ldexp(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLdexp(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ldexp_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLdexp_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LdexpOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLdexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Le(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLe(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Le_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLe_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LeTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LeTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LeakyRelu(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeakyRelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LeakyRelu_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeakyRelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cselfIsResult := int32(0) - if selfIsResult { - cselfIsResult = int32(1) - } - lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cselfIsResult := int32(0) - if selfIsResult { - cselfIsResult = int32(1) - } - lib.AtgLeakyReluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LeakyReluOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Lerp(end *Tensor, weight *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Lerp_(end *Tensor, weight *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerpScalarOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LerpTensor(end *Tensor, weight *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerpTensor(ptr, ts.ctensor, end.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LerpTensor_(end *Tensor, weight *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerpTensor_(ptr, ts.ctensor, end.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerpTensorOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Less(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLess(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Less_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLess_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LessEqual(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessEqual(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LessEqual_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessEqual_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LessEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LessEqualTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessEqualTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LessEqualTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessEqualTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LessEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LessScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LessTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LessTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LessTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLessTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Lgamma(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLgamma(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Lgamma_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLgamma_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LgammaOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgCholesky(upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgLinalgCholesky(ptr, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgCholeskyEx(upper bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - ccheckErrors := int32(0) - if checkErrors { - ccheckErrors = int32(1) - } - lib.AtgLinalgCholeskyEx(ctensorPtr0, ts.ctensor, cupper, ccheckErrors) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - ccheckErrors := int32(0) - if checkErrors { - ccheckErrors = int32(1) - } - lib.AtgLinalgCholeskyExL(ctensorPtr0, l.ctensor, info.ctensor, ts.ctensor, cupper, ccheckErrors) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgCholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgLinalgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgCond(p *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgCond(ptr, ts.ctensor, p.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgCondOut(out *Tensor, p *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgCondOut(ptr, out.ctensor, ts.ctensor, p.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgCondPStr(p string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgCondPStr(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgCondPStrOut(out *Tensor, p string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgCondPStrOut(ptr, out.ctensor, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgDet(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgDet(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgDetOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgDetOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgEig(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgEig(ctensorPtr0, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgEigOut(ctensorPtr0, eigenvalues.ctensor, eigenvectors.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgEigh(uPLO string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgEigh(ctensorPtr0, ts.ctensor, uPLO) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgEighEigvals(ctensorPtr0, eigvals.ctensor, eigvecs.ctensor, ts.ctensor, uPLO) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgEigvals(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgEigvals(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgEigvalsOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgEigvalsOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgEigvalsh(uPLO string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgEigvalsh(ptr, ts.ctensor, uPLO) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgEigvalshOut(out *Tensor, uPLO string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgEigvalshOut(ptr, out.ctensor, ts.ctensor, uPLO) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgHouseholderProduct(input *Tensor, tau *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgHouseholderProduct(ptr, input.ctensor, tau.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgHouseholderProductOut(ptr, out.ctensor, input.ctensor, tau.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgInv(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgInv(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgInvEx(checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ccheckErrors := int32(0) - if checkErrors { - ccheckErrors = int32(1) - } - lib.AtgLinalgInvEx(ctensorPtr0, ts.ctensor, ccheckErrors) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgInvExInverse(inverse *Tensor, info *Tensor, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ccheckErrors := int32(0) - if checkErrors { - ccheckErrors = int32(1) - } - lib.AtgLinalgInvExInverse(ctensorPtr0, inverse.ctensor, info.ctensor, ts.ctensor, ccheckErrors) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgInvOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgInvOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgLstsq(b *Tensor, rcond []float64, driver string, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - var crcondVal float64 = 0.0 - var crcondNull int = 1 - if len(rcond) > 0 { - crcondVal = rcond[0] - crcondNull = 0 - } - lib.AtgLinalgLstsq(ctensorPtr0, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func (ts *Tensor) LinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - - var crcondVal float64 = 0.0 - var crcondNull int = 1 - if len(rcond) > 0 { - crcondVal = rcond[0] - crcondNull = 0 - } - lib.AtgLinalgLstsqOut(ctensorPtr0, solution.ctensor, residuals.ctensor, rank.ctensor, singularValues.ctensor, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - - return retVal0, retVal1, retVal2, retVal3, err -} - -func (ts *Tensor) LinalgMatmul(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgMatmul(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgMatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgMatrixPower(n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgMatrixPower(ptr, ts.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgMatrixPowerOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgMatrixRank(tol []float64, hermitian bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctolVal float64 = 0.0 - var ctolNull int = 1 - if len(tol) > 0 { - ctolVal = tol[0] - ctolNull = 0 - } - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgMatrixRank(ptr, ts.ctensor, ctolVal, ctolNull, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgMatrixRankOut(out *Tensor, tol []float64, hermitian bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctolVal float64 = 0.0 - var ctolNull int = 1 - if len(tol) > 0 { - ctolVal = tol[0] - ctolNull = 0 - } - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgMatrixRankOut(ptr, out.ctensor, ts.ctensor, ctolVal, ctolNull, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgMatrixRankOutTolTensor(ptr, out.ctensor, input.ctensor, tol.ctensor, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgMatrixRankTolTensor(ptr, input.ctensor, tol.ctensor, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgMultiDot(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgLinalgMultiDot(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgMultiDotOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgLinalgMultiDotOut(ptr, out.ctensor, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgLinalgNorm(ptr, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgLinalgNormOrdStr(ptr, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgLinalgNormOrdStrOut(ptr, out.ctensor, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgLinalgNormOut(ptr, out.ctensor, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgPinv(rcond float64, hermitian bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgPinv(ptr, ts.ctensor, rcond, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgPinvOut(ptr, out.ctensor, ts.ctensor, rcond, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgPinvOutRcondTensor(ptr, out.ctensor, ts.ctensor, rcond.ctensor, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chermitian := int32(0) - if hermitian { - chermitian = int32(1) - } - lib.AtgLinalgPinvRcondTensor(ptr, ts.ctensor, rcond.ctensor, chermitian) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgQr(mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgQr(ctensorPtr0, ts.ctensor, mode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgQrOut(q *Tensor, r *Tensor, mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgQrOut(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, mode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgSlogdet(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgSlogdet(ctensorPtr0, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLinalgSlogdetOut(ctensorPtr0, sign.ctensor, logabsdet.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func LinalgSolve(input *Tensor, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgSolve(ptr, input.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgSolveOut(out *Tensor, input *Tensor, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgSolveOut(ptr, out.ctensor, input.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgSvd(fullMatrices bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - cfullMatrices := int32(0) - if fullMatrices { - cfullMatrices = int32(1) - } - lib.AtgLinalgSvd(ctensorPtr0, ts.ctensor, cfullMatrices) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, fullMatrices bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - cfullMatrices := int32(0) - if fullMatrices { - cfullMatrices = int32(1) - } - lib.AtgLinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, ts.ctensor, cfullMatrices) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func LinalgSvdvals(input *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgSvdvals(ptr, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinalgSvdvalsOut(out *Tensor, input *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgSvdvalsOut(ptr, out.ctensor, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgTensorinv(ind int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgTensorinv(ptr, ts.ctensor, ind) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgTensorinvOut(out *Tensor, ind int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgTensorinvOut(ptr, out.ctensor, ts.ctensor, ind) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgTensorsolve(other *Tensor, dims []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgTensorsolve(ptr, ts.ctensor, other.ctensor, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinalgTensorsolveOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Linear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinearOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Linspace(start *Scalar, end *Scalar, steps []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cstepsVal int64 = 0 - var cstepsNull int = 1 - if len(steps) > 0 { - cstepsVal = steps[0] - cstepsNull = 0 - } - lib.AtgLinspace(ptr, start.cscalar, end.cscalar, cstepsVal, cstepsNull, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cstepsVal int64 = 0 - var cstepsNull int = 1 - if len(steps) > 0 { - cstepsVal = steps[0] - cstepsNull = 0 - } - lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, cstepsVal, cstepsNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log10(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog10(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log10_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog10_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Log10Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log1p(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog1p(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log1p_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog1p_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Log1pOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log2(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog2(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log2_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog2_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Log2Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Log_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LogNormal_(mean float64, std float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogNormal_(ptr, ts.ctensor, mean, std) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LogOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogSigmoid(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoid(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogSigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logaddexp(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogaddexp(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logaddexp2(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogaddexp2(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logaddexp2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogaddexp2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogaddexpOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogaddexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logcumsumexp(dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogcumsumexp(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logdet(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogdet(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalAnd(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalAnd_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LogicalAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalNot(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalNot(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalNot_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalNot_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LogicalNotOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalOr(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalOr_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LogicalOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalXor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogicalXor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LogicalXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logit(eps []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cepsVal float64 = 0.0 - var cepsNull int = 1 - if len(eps) > 0 { - cepsVal = eps[0] - cepsNull = 0 - } - lib.AtgLogit(ptr, ts.ctensor, cepsVal, cepsNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logit_(eps []float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cepsVal float64 = 0.0 - var cepsNull int = 1 - if len(eps) > 0 { - cepsVal = eps[0] - cepsNull = 0 - } - lib.AtgLogit_(ptr, ts.ctensor, cepsVal, cepsNull) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LogitBackward(gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cepsVal float64 = 0.0 - var cepsNull int = 1 - if len(eps) > 0 { - cepsVal = eps[0] - cepsNull = 0 - } - lib.AtgLogitBackward(ptr, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cepsVal float64 = 0.0 - var cepsNull int = 1 - if len(eps) > 0 { - cepsVal = eps[0] - cepsNull = 0 - } - lib.AtgLogitBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogitOut(out *Tensor, eps []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cepsVal float64 = 0.0 - var cepsNull int = 1 - if len(eps) > 0 { - cepsVal = eps[0] - cepsNull = 0 - } - lib.AtgLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Logspace(start *Scalar, end *Scalar, steps []int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cstepsVal int64 = 0 - var cstepsNull int = 1 - if len(steps) > 0 { - cstepsVal = steps[0] - cstepsNull = 0 - } - lib.AtgLogspace(ptr, start.cscalar, end.cscalar, cstepsVal, cstepsNull, base, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64, base float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cstepsVal int64 = 0 - var cstepsNull int = 1 - if len(steps) > 0 { - cstepsVal = steps[0] - cstepsNull = 0 - } - lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, cstepsVal, cstepsNull, base) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Logsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Lstm(input *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - var chx []lib.Ctensor - for _, t := range hx { - chx = append(chx, t.ctensor) - } - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.AtgLstm(ctensorPtr0, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func LstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var chx []lib.Ctensor - for _, t := range hx { - chx = append(chx, t.ctensor) - } - lib.AtgLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func LstmData(data *Tensor, batchSizes *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - var chx []lib.Ctensor - for _, t := range hx { - chx = append(chx, t.ctensor) - } - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - lib.AtgLstmData(ctensorPtr0, data.ctensor, batchSizes.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) Lstsq(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLstsq(ctensorPtr0, ts.ctensor, a.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) LstsqX(x *Tensor, qr *Tensor, a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgLstsqX(ctensorPtr0, x.ctensor, qr.ctensor, ts.ctensor, a.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Lt(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLt(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Lt_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLt_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LtScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LtTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLtTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LtTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLtTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) LtTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LuSolve(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) LuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func LuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - cunpackData := int32(0) - if unpackData { - cunpackData = int32(1) - } - cunpackPivots := int32(0) - if unpackPivots { - cunpackPivots = int32(1) - } - lib.AtgLuUnpack(ctensorPtr0, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func LuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - cunpackData := int32(0) - if unpackData { - cunpackData = int32(1) - } - cunpackPivots := int32(0) - if unpackPivots { - cunpackPivots = int32(1) - } - lib.AtgLuUnpackOut(ctensorPtr0, p.ctensor, l.ctensor, u.ctensor, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func MarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaskedFill(mask *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaskedFill_(mask *Tensor, value *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MaskedFillTensor(mask *Tensor, value *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFillTensor(ptr, ts.ctensor, mask.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaskedFillTensor_(mask *Tensor, value *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFillTensor_(ptr, ts.ctensor, mask.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MaskedScatter(mask *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaskedScatter_(mask *Tensor, source *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MaskedSelect(mask *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedSelectBackward(ptr, grad.ctensor, input.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaskedSelectOut(out *Tensor, mask *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Matmul(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatmul(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MatrixExp(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatrixExp(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MatrixExpBackward(grad *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatrixExpBackward(ptr, ts.ctensor, grad.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MatrixPower(n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatrixPower(ptr, ts.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MatrixPowerOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MatrixRank(symmetric bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csymmetric := int32(0) - if symmetric { - csymmetric = int32(1) - } - lib.AtgMatrixRank(ptr, ts.ctensor, csymmetric) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MatrixRankTol(tol float64, symmetric bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csymmetric := int32(0) - if symmetric { - csymmetric = int32(1) - } - lib.AtgMatrixRankTol(ptr, ts.ctensor, tol, csymmetric) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Max(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMax(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMaxDimMax(ctensorPtr0, max.ctensor, maxValues.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MaxOther(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxOther(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool1dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool2dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool2dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool2dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool3dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool3dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMaxPool3dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MaxUnpool2d(indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxUnpool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxUnpool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Maximum(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaximum(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MaximumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaximumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mean(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMean(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMeanDim(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Median(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMedian(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MedianDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Min(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMin(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MinDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMinDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMinDimMin(ctensorPtr0, min.ctensor, minIndices.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MinOther(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMinOther(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MinOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Minimum(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMinimum(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MinimumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMinimumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgMiopenBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func MiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgMiopenBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) MiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenConvolutionBackwardBias(gradOutput *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMiopenConvolutionBackwardBias(ptr, gradOutput.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MiopenConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenDepthwiseConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgMiopenDepthwiseConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenRnn(input *Tensor, weight []Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0))) - - var cweight []lib.Ctensor - for _, t := range weight { - cweight = append(cweight, t.ctensor) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - lib.AtgMiopenRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, hx.ctensor, cx.ctensor, mode, hiddenSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, len(batchSizes), dropoutState.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, retVal3, retVal4, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} - - return retVal0, retVal1, retVal2, retVal3, retVal4, err -} - -func (ts *Tensor) Mish(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMish(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mish_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMish_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MishBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMishBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MishOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMishOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnAdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MkldnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbiasDefined := int32(0) - if biasDefined { - cbiasDefined = int32(1) - } - lib.AtgMkldnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbiasDefined) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MkldnnConvolutionBackwardWeights(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cbiasDefined := int32(0) - if biasDefined { - cbiasDefined = int32(1) - } - lib.AtgMkldnnConvolutionBackwardWeights(ctensorPtr0, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbiasDefined) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MkldnnLinear(weight *Tensor, bias *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnLinear(ptr, ts.ctensor, weight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnLinearBackwardInput(ptr, inputSize, len(inputSize), gradOutput.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cbiasDefined := int32(0) - if biasDefined { - cbiasDefined = int32(1) - } - lib.AtgMkldnnLinearBackwardWeights(ctensorPtr0, gradOutput.ctensor, input.ctensor, weight.ctensor, cbiasDefined) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMkldnnMaxPool2dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMkldnnMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func MkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgMkldnnMaxPool3dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnReorderConv3dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMm(ptr, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mode(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgMode(ctensorPtr0, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) ModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgModeValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Moveaxis(source []int64, destination []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMoveaxis(ptr, ts.ctensor, source, len(source), destination, len(destination)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MoveaxisInt(source int64, destination int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMoveaxisInt(ptr, ts.ctensor, source, destination) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Movedim(source []int64, destination []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMovedim(ptr, ts.ctensor, source, len(source), destination, len(destination)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MovedimInt(source int64, destination int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMovedimInt(ptr, ts.ctensor, source, destination) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MseLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MseLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Msort(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMsort(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MsortOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMsortOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mul(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMul(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mul_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMul_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MulScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMulScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MulScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMulScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultilabelMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Multinomial(numSamples int64, replacement bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - creplacement := int32(0) - if replacement { - creplacement = int32(1) - } - lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - creplacement := int32(0) - if replacement { - creplacement = int32(1) - } - lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Multiply(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiply(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Multiply_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiply_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MultiplyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiplyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultiplyScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiplyScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MultiplyScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiplyScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Mv(vec *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMv(ptr, ts.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) MvOut(out *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mvlgamma(p int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMvlgamma(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Mvlgamma_(p int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMvlgamma_(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) MvlgammaOut(out *Tensor, p int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMvlgammaOut(ptr, out.ctensor, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanToNum(nan []float64, posinf []float64, neginf []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnanVal float64 = 0.0 - var cnanNull int = 1 - if len(nan) > 0 { - cnanVal = nan[0] - cnanNull = 0 - } - var cposinfVal float64 = 0.0 - var cposinfNull int = 1 - if len(posinf) > 0 { - cposinfVal = posinf[0] - cposinfNull = 0 - } - var cneginfVal float64 = 0.0 - var cneginfNull int = 1 - if len(neginf) > 0 { - cneginfVal = neginf[0] - cneginfNull = 0 - } - lib.AtgNanToNum(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanToNum_(nan []float64, posinf []float64, neginf []float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnanVal float64 = 0.0 - var cnanNull int = 1 - if len(nan) > 0 { - cnanVal = nan[0] - cnanNull = 0 - } - var cposinfVal float64 = 0.0 - var cposinfNull int = 1 - if len(posinf) > 0 { - cposinfVal = posinf[0] - cposinfNull = 0 - } - var cneginfVal float64 = 0.0 - var cneginfNull int = 1 - if len(neginf) > 0 { - cneginfVal = neginf[0] - cneginfNull = 0 - } - lib.AtgNanToNum_(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnanVal float64 = 0.0 - var cnanNull int = 1 - if len(nan) > 0 { - cnanVal = nan[0] - cnanNull = 0 - } - var cposinfVal float64 = 0.0 - var cposinfNull int = 1 - if len(posinf) > 0 { - cposinfVal = posinf[0] - cposinfNull = 0 - } - var cneginfVal float64 = 0.0 - var cneginfNull int = 1 - if len(neginf) > 0 { - cneginfVal = neginf[0] - cneginfNull = 0 - } - lib.AtgNanToNumOut(ptr, out.ctensor, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Nanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanmean(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanmeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Nanmedian(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNanmedian(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanmedianDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanmedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) NanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanmedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Nanquantile(q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanquantileNew(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantileNew(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanquantileNewOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantileNewOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanquantileNewScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantileNewScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanquantileNewScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantileNewScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanquantileScalar(q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNanquantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Nansum(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNansum(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NansumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNansumDimIntlist(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NansumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNansumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Narrow(dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNarrow(ptr, ts.ctensor, dim, start, length) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NarrowCopy(dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNarrowCopyOut(ptr, out.ctensor, ts.ctensor, dim, start, length) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NarrowTensor(dim int64, start *Tensor, length int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNarrowTensor(ptr, ts.ctensor, dim, start.ctensor, length) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func NativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgNativeBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func NativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgNativeBatchNormOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func NativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgNativeGroupNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, n, c, hxW, group, eps) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func NativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgNativeLayerNorm(ctensorPtr0, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) NativeNorm(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNativeNorm(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNativeNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ne(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNe(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ne_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNe_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NeTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NeTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Neg(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeg(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Neg_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeg_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NegOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNegOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Negative(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNegative(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Negative_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNegative_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NegativeOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNegativeOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewEmpty(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewEmptyStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewFull(ptr, ts.ctensor, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewOnes(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewZeros(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Nextafter(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNextafter(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Nextafter_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNextafter_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NextafterOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNextafterOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLossNd(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Nonzero(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNonzero(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NonzeroOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Norm(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNorm(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNormDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func NormExceptDim(v *Tensor, pow int64, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNormScalaroptDim(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormScalaroptDtype(ptr, ts.ctensor, p.cscalar, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Normal(out *Tensor, mean *Tensor, std float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormal(ptr, out.ctensor, mean.ctensor, std) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Normal_(mean float64, std float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormal_(ptr, ts.ctensor, mean, std) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func NormalFloatFloatOut(out *Tensor, mean float64, std float64, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormalFloatFloatOut(ptr, out.ctensor, mean, std, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func NormalFloatTensorOut(out *Tensor, mean float64, std *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormalFloatTensorOut(ptr, out.ctensor, mean, std.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func NormalTensorTensorOut(out *Tensor, mean *Tensor, std *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormalTensorTensorOut(ptr, out.ctensor, mean.ctensor, std.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NotEqual(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNotEqual(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NotEqual_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNotEqual_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NotEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNotEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NotEqualTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNotEqualTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NotEqualTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNotEqualTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) NotEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNotEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NuclearNorm(keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NuclearNormDim(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNuclearNormDim(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNuclearNormDimOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NuclearNormOut(out *Tensor, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) NumpyT(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNumpyT(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) OneHot(numClasses int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOneHot(ptr, ts.ctensor, numClasses) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOnes(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) OnesLike(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOnesLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func OnesOut(out *Tensor, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOnesOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Orgqr(input2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) OrgqrOut(out *Tensor, input2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ormqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cleft := int32(0) - if left { - cleft = int32(1) - } - ctranspose := int32(0) - if transpose { - ctranspose = int32(1) - } - lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) OrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cleft := int32(0) - if left { - cleft = int32(1) - } - ctranspose := int32(0) - if transpose { - ctranspose = int32(1) - } - lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Outer(vec2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOuter(ptr, ts.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) OuterOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOuterOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) OutputNr(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgOutputNr(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func PadSequence(sequences []Tensor, batchFirst bool, paddingValue float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var csequences []lib.Ctensor - for _, t := range sequences { - csequences = append(csequences, t.ctensor) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.AtgPadSequence(ptr, csequences, len(csequences), cbatchFirst, paddingValue) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func PairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Pdist(p float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPdist(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Permute(dims []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPermute(ptr, ts.ctensor, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) PinMemory(device gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPinMemory(ptr, ts.ctensor, device.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Pinverse(rcond float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPinverse(ptr, ts.ctensor, rcond) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) PixelShuffle(upscaleFactor int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) PixelUnshuffle(downscaleFactor int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPixelUnshuffle(ptr, ts.ctensor, downscaleFactor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Poisson(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPoisson(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func PoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - clogInput := int32(0) - if logInput { - clogInput = int32(1) - } - cfull := int32(0) - if full { - cfull = int32(1) - } - lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Polar(abs *Tensor, angle *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolar(ptr, abs.ctensor, angle.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func PolarOut(out *Tensor, abs *Tensor, angle *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolarOut(ptr, out.ctensor, abs.ctensor, angle.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Polygamma(n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolygamma(ptr, n, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Polygamma_(n int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolygamma_(ptr, ts.ctensor, n) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) PolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Positive(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPositive(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Pow(exponent *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPow(ptr, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Pow_(exponent *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func PowScalar(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowScalar(ptr, selfScalar.cscalar, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func PowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) PowTensor_(exponent *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowTensor_(ptr, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) PowTensorScalar(exponent *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowTensorScalar(ptr, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) PowTensorScalarOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) PowTensorTensorOut(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Prelu(weight *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) PreluBackward(gradOutput *Tensor, weight *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgPreluBackward(ctensorPtr0, gradOutput.ctensor, ts.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Prod(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgProd(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgProdDimInt(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgProdIntOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Put(index *Tensor, source *Tensor, accumulate bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - caccumulate := int32(0) - if accumulate { - caccumulate = int32(1) - } - lib.AtgPut(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Put_(index *Tensor, source *Tensor, accumulate bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - caccumulate := int32(0) - if accumulate { - caccumulate = int32(1) - } - lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) QPerChannelAxis(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgQPerChannelAxis(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) QPerChannelScales(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQPerChannelScales(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QPerChannelZeroPoints(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QScale(del bool) (retVal float64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgQScale(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) QZeroPoint(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgQZeroPoint(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) Qr(some bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - csome := int32(0) - if some { - csome = int32(1) - } - lib.AtgQr(ctensorPtr0, ts.ctensor, csome) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) QrQ(q *Tensor, r *Tensor, some bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - csome := int32(0) - if some { - csome = int32(1) - } - lib.AtgQrQ(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, csome) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Quantile(q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantileNew(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantileNew(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantileNewOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantileNewOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantileNewScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantileNewScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantileNewScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantileNewScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantileScalar(q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgQuantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizePerTensorTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var chx []lib.Ctensor - for _, t := range hx { - chx = append(chx, t.ctensor) - } - lib.AtgQuantizedLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) QuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgQuantizedMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { - cceilMode = int32(1) - } - lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rad2deg(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRad2deg(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rad2deg_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRad2deg_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Rad2degOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRad2degOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRand(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RandLike(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandOut(out *Tensor, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandint(ptr, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RandintLike(high int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintLike(ptr, ts.ctensor, high) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RandintLikeLowDtype(low int64, high int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintLikeLowDtype(ptr, ts.ctensor, low, high) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintLow(ptr, low, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandintLowOut(out *Tensor, low int64, high int64, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintLowOut(ptr, out.ctensor, low, high, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandintOut(out *Tensor, high int64, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintOut(ptr, out.ctensor, high, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandn(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RandnLike(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandnLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandnOut(out *Tensor, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandnOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Random_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandom_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RandomFrom_(from int64, to []int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctoVal int64 = 0 - var ctoNull int = 1 - if len(to) > 0 { - ctoVal = to[0] - ctoNull = 0 - } - lib.AtgRandomFrom_(ptr, ts.ctensor, from, ctoVal, ctoNull) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RandomTo_(to int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandomTo_(ptr, ts.ctensor, to) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandpermOut(out *Tensor, n int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandpermOut(ptr, out.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Range(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RangeOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRangeStep(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Ravel(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRavel(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Real(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReal(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Reciprocal(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReciprocal(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Reciprocal_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReciprocal_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ReciprocalOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad1d(padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad2d(padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad3d(padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad3d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReflectionPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Relu(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Relu6(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRelu6(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Relu6_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRelu6_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Relu_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Remainder(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainder(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Remainder_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RemainderScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainderScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RemainderScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainderScalarTensor(ptr, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RemainderTensor(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainderTensor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RemainderTensor_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainderTensor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RemainderTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainderTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Renorm(p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Renorm_(p *Scalar, dim int64, maxnorm *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Repeat(repeats []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRepeat(ptr, ts.ctensor, repeats, len(repeats)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RepeatInterleave(repeats *Tensor, outputSize []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var coutputSizeVal int64 = 0 - var coutputSizeNull int = 1 - if len(outputSize) > 0 { - coutputSizeVal = outputSize[0] - coutputSizeNull = 0 - } - lib.AtgRepeatInterleave(ptr, repeats.ctensor, coutputSizeVal, coutputSizeNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - var coutputSizeVal int64 = 0 - var coutputSizeNull int = 1 - if len(outputSize) > 0 { - coutputSizeVal = outputSize[0] - coutputSizeNull = 0 - } - lib.AtgRepeatInterleaveSelfInt(ptr, ts.ctensor, repeats, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - var coutputSizeVal int64 = 0 - var coutputSizeNull int = 1 - if len(outputSize) > 0 { - coutputSizeVal = outputSize[0] - coutputSizeNull = 0 - } - lib.AtgRepeatInterleaveSelfTensor(ptr, ts.ctensor, repeats.ctensor, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad1d(padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad2d(padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad3d(padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReplicationPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RequiresGrad_(requiresGrad bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - crequiresGrad := int32(0) - if requiresGrad { - crequiresGrad = int32(1) - } - lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Reshape(shape []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReshape(ptr, ts.ctensor, shape, len(shape)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ReshapeAs(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Resize_(size []int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgResize_(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ResizeAs_(theTemplate *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ResizeAsSparse_(theTemplate *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgResizeAsSparse_(ptr, ts.ctensor, theTemplate.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ResolveConj(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgResolveConj(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ResolveNeg(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgResolveNeg(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RetainsGrad(del bool) (retVal bool, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgRetainsGrad(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func RnnRelu(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.AtgRnnRelu(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func RnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - lib.AtgRnnReluData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func RnnTanh(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - cbatchFirst := int32(0) - if batchFirst { - cbatchFirst = int32(1) - } - lib.AtgRnnTanh(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func RnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var cparams []lib.Ctensor - for _, t := range params { - cparams = append(cparams, t.ctensor) - } - chasBiases := int32(0) - if hasBiases { - chasBiases = int32(1) - } - ctrain := int32(0) - if train { - ctrain = int32(1) - } - cbidirectional := int32(0) - if bidirectional { - cbidirectional = int32(1) - } - lib.AtgRnnTanhData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Roll(shifts []int64, dims []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRoll(ptr, ts.ctensor, shifts, len(shifts), dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rot90(k int64, dims []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRot90(ptr, ts.ctensor, k, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Round(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRound(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Round_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRound_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RoundOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RowStack(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgRowStack(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func RowStackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgRowStackOut(ptr, out.ctensor, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rrelu(training bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgRrelu(ptr, ts.ctensor, ctraining) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rrelu_(training bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgRrelu_(ptr, ts.ctensor, ctraining) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RreluWithNoise(noise *Tensor, training bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RreluWithNoise_(noise *Tensor, training bool) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - cselfIsResult := int32(0) - if selfIsResult { - cselfIsResult = int32(1) - } - lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { - ctraining = int32(1) - } - lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rsqrt(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsqrt(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rsqrt_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsqrt_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) RsqrtOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Rsub(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsub(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) RsubScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsubScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Scatter(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Scatter_(dim int64, index *Tensor, src *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ScatterAdd(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterAdd_(dim int64, index *Tensor, src *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterReduce(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterReduce_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterSrcOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterValue(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterValue(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterValue_(dim int64, index *Tensor, value *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterValue_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterValueOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterValueReduce(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterValueReduce_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterValueReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Searchsorted(sortedSequence *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - cright := int32(0) - if right { - cright = int32(1) - } - lib.AtgSearchsorted(ptr, sortedSequence.ctensor, ts.ctensor, coutInt32, cright) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - cright := int32(0) - if right { - cright = int32(1) - } - lib.AtgSearchsortedScalar(ptr, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - coutInt32 := int32(0) - if outInt32 { - coutInt32 = int32(1) - } - cright := int32(0) - if right { - cright = int32(1) - } - lib.AtgSearchsortedTensorOut(ptr, out.ctensor, sortedSequence.ctensor, ts.ctensor, coutInt32, cright) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, axis int64, unsafety bool, initial *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunsafety := int32(0) - if unsafety { - cunsafety = int32(1) - } - lib.AtgSegmentReduce(ptr, data.ctensor, reduce, lengths.ctensor, indices.ctensor, axis, cunsafety, initial.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Select(dim int64, index int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSelect(ptr, ts.ctensor, dim, index) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSelectBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), dim, index) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Selu(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Selu_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Set_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSet_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SetRequiresGrad(r bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cr := int32(0) - if r { - cr = int32(1) - } - lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SetSourceTensor_(source *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSetSourceTensor_(ptr, ts.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Sgn(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSgn(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sgn_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSgn_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SgnOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSgnOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sigmoid(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoid(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sigmoid_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoid_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func SigmoidBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sign(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSign(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sign_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSign_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SignOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSignOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Signbit(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSignbit(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SignbitOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSignbitOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Silu(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSilu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Silu_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSilu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SiluBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSiluBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSiluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SiluOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSiluOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sin(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSin(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sin_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSin_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SinOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sinc(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinc(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sinc_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinc_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SincOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSincOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sinh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sinh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SinhOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Slice(dim int64, start []int64, end []int64, step int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cstartVal int64 = 0 - var cstartNull int = 1 - if len(start) > 0 { - cstartVal = start[0] - cstartNull = 0 - } - var cendVal int64 = 0 - var cendNull int = 1 - if len(end) > 0 { - cendVal = end[0] - cendNull = 0 - } - lib.AtgSlice(ptr, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSliceBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), dim, start, end, step) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Slogdet(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgSlogdet(ctensorPtr0, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) SlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Smm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction, beta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, beta) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Softmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Softplus(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplus(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplusBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftplusOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Softshrink(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrink(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SoftshrinkOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Solve(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgSolve(ctensorPtr0, ts.ctensor, a.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) SolveSolution(solution *Tensor, lu *Tensor, a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - lib.AtgSolveSolution(ctensorPtr0, solution.ctensor, lu.ctensor, ts.ctensor, a.ctensor) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Sort(dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cdescending := int32(0) - if descending { - cdescending = int32(1) - } - lib.AtgSort(ctensorPtr0, ts.ctensor, dim, cdescending) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) SortStable(stable bool, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cstable := int32(0) - if stable { - cstable = int32(1) - } - cdescending := int32(0) - if descending { - cdescending = int32(1) - } - lib.AtgSortStable(ctensorPtr0, ts.ctensor, cstable, dim, cdescending) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) SortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cdescending := int32(0) - if descending { - cdescending = int32(1) - } - lib.AtgSortValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, cdescending) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) SortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cstable := int32(0) - if stable { - cstable = int32(1) - } - cdescending := int32(0) - if descending { - cdescending = int32(1) - } - lib.AtgSortValuesStable(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, cstable, dim, cdescending) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCooTensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCooTensorIndices(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCooTensorIndicesSize(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCsrTensor(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCsrTensorCrowColValueSize(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SparseDim(del bool) (retVal int64, err error) { - if del { - defer ts.MustDrop() - } - - retVal = lib.AtgSparseDim(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - return retVal, err -} - -func (ts *Tensor) SparseMask(mask *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseResize_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SpecialDigamma(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialDigamma(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialDigammaOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialDigammaOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialEntr(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialEntr(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialEntrOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialEntrOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErf(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErf(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErfOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErfOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErfc(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErfc(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErfcOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErfcOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErfcx(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErfcx(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErfcxOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErfcxOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErfinv(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErfinv(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialErfinvOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialErfinvOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialExp2(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialExp2(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialExp2Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialExp2Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialExpit(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialExpit(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialExpitOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialExpitOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialExpm1(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialExpm1(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialExpm1Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialExpm1Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialGammainc(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialGammainc(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialGammaincOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialGammaincOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialGammaincc(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialGammaincc(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialGammainccOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialGammainccOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialGammaln(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialGammaln(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialGammalnOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialGammalnOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI0(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI0(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI0Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI0Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI0e(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI0e(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI0eOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI0eOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI1(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI1(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI1Out(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI1Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI1e(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI1e(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialI1eOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialI1eOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialLog1p(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialLog1p(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialLog1pOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialLog1pOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialLogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialLogit(eps []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cepsVal float64 = 0.0 - var cepsNull int = 1 - if len(eps) > 0 { - cepsVal = eps[0] - cepsNull = 0 - } - lib.AtgSpecialLogit(ptr, ts.ctensor, cepsVal, cepsNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialLogitOut(out *Tensor, eps []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cepsVal float64 = 0.0 - var cepsNull int = 1 - if len(eps) > 0 { - cepsVal = eps[0] - cepsNull = 0 - } - lib.AtgSpecialLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialLogsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgSpecialLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgSpecialLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialMultigammaln(p int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialMultigammaln(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialMultigammalnOut(out *Tensor, p int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialMultigammalnOut(ptr, out.ctensor, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialNdtr(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialNdtr(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialNdtrOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialNdtrOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialNdtri(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialNdtri(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialNdtriOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialNdtriOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialPolygamma(n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialPolygamma(ptr, n, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialPolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialPolygammaOut(ptr, out.ctensor, n, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialPsi(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialPsi(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialPsiOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialPsiOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialRound(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialRound(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialRoundOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialRoundOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialSinc(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialSinc(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialSincOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialSincOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlog1py(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlog1py(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlog1pyOtherScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlog1pyOtherScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlog1pyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlog1pyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlog1pyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlog1pySelfScalar(ptr, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlog1pySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlogy(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlogy(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlogyOtherScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlogyOtherScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlogyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialXlogyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlogyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlogySelfScalar(ptr, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialXlogySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialZeta(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialZeta(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialZetaOtherScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialZetaOtherScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialZetaOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SpecialZetaOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialZetaOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialZetaSelfScalar(ptr, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func SpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSpecialZetaSelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sqrt(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqrt(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sqrt_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqrt_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SqrtOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Square(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSquare(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Square_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSquare_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SquareOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSquareOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Squeeze(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueeze(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Squeeze_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueeze_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SqueezeDim(dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueezeDim(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SqueezeDim_(dim int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueezeDim_(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Sspaddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Stack(tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgStack(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func StackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Std(unbiased bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - lib.AtgStd(ptr, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) StdCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ccorrectionVal int64 = 0 - var ccorrectionNull int = 1 - if len(correction) > 0 { - ccorrectionVal = correction[0] - ccorrectionNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgStdCorrection(ptr, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) StdCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ccorrectionVal int64 = 0 - var ccorrectionNull int = 1 - if len(correction) > 0 { - ccorrectionVal = correction[0] - ccorrectionNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgStdCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) StdDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgStdDim(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) StdMean(unbiased bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - lib.AtgStdMean(ctensorPtr0, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) StdMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var ccorrectionVal int64 = 0 - var ccorrectionNull int = 1 - if len(correction) > 0 { - ccorrectionVal = correction[0] - ccorrectionNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgStdMeanCorrection(ctensorPtr0, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) StdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgStdMeanDim(ctensorPtr0, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) StdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Stft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var chopLengthVal int64 = 0 - var chopLengthNull int = 1 - if len(hopLength) > 0 { - chopLengthVal = hopLength[0] - chopLengthNull = 0 - } - var cwinLengthVal int64 = 0 - var cwinLengthNull int = 1 - if len(winLength) > 0 { - cwinLengthVal = winLength[0] - cwinLengthNull = 0 - } - cnormalized := int32(0) - if normalized { - cnormalized = int32(1) - } - conesided := int32(0) - if onesided { - conesided = int32(1) - } - creturnComplex := int32(0) - if returnComplex { - creturnComplex = int32(1) - } - lib.AtgStft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, cnormalized, conesided, creturnComplex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sub(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSub(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Sub_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSub_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SubOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SubScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SubScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Subtract(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubtract(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Subtract_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubtract_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) SubtractOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubtractOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SubtractScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubtractScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SubtractScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubtractScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Sum(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSum(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgSumDimIntlist(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgSumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) SumToSize(size []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSumToSize(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Svd(some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - csome := int32(0) - if some { - csome = int32(1) - } - ccomputeUv := int32(0) - if computeUv { - ccomputeUv = int32(1) - } - lib.AtgSvd(ctensorPtr0, ts.ctensor, csome, ccomputeUv) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) SvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - csome := int32(0) - if some { - csome = int32(1) - } - ccomputeUv := int32(0) - if computeUv { - ccomputeUv = int32(1) - } - lib.AtgSvdU(ctensorPtr0, u.ctensor, s.ctensor, v.ctensor, ts.ctensor, csome, ccomputeUv) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) Swapaxes(axis0 int64, axis1 int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSwapaxes(ptr, ts.ctensor, axis0, axis1) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Swapaxes_(axis0 int64, axis1 int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSwapaxes_(ptr, ts.ctensor, axis0, axis1) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Swapdims(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSwapdims(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Swapdims_(dim0 int64, dim1 int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSwapdims_(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Symeig(eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ceigenvectors := int32(0) - if eigenvectors { - ceigenvectors = int32(1) - } - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgSymeig(ctensorPtr0, ts.ctensor, ceigenvectors, cupper) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) SymeigE(e *Tensor, v *Tensor, eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - ceigenvectors := int32(0) - if eigenvectors { - ceigenvectors = int32(1) - } - cupper := int32(0) - if upper { - cupper = int32(1) - } - lib.AtgSymeigE(ctensorPtr0, e.ctensor, v.ctensor, ts.ctensor, ceigenvectors, cupper) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) T(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgT(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) T_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgT_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Take(index *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTake(ptr, ts.ctensor, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TakeAlongDim(indices *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - lib.AtgTakeAlongDim(ptr, ts.ctensor, indices.ctensor, cdimVal, cdimNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - lib.AtgTakeAlongDimOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, cdimVal, cdimNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TakeOut(out *Tensor, index *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Tan(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTan(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Tan_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTan_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) TanOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Tanh(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Tanh_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func TanhBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func TanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Tensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTensordotOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Threshold(threshold *Scalar, value *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Threshold_(threshold *Scalar, value *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) ThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThresholdBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, threshold.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Tile(dims []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTile(ptr, ts.ctensor, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) To(device gotch.Device, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTo(ptr, ts.ctensor, device.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToDense(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToDense(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ToDenseBackward(grad *Tensor, input *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - ccopy := int32(0) - if copy { - ccopy = int32(1) - } - lib.AtgToDevice(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - ccopy := int32(0) - if copy { - ccopy = int32(1) - } - lib.AtgToDtype(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - ccopy := int32(0) - if copy { - ccopy = int32(1) - } - lib.AtgToDtypeLayout(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToMkldnn(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToMkldnn(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ToMkldnnBackward(grad *Tensor, input *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToOther(other *Tensor, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { - cnonBlocking = int32(1) - } - ccopy := int32(0) - if copy { - ccopy = int32(1) - } - lib.AtgToOther(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToSparse(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToSparse(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ToSparseSparseDim(sparseDim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToSparseSparseDim(ptr, ts.ctensor, sparseDim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Topk(k int64, dim int64, largest bool, sorted bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - clargest := int32(0) - if largest { - clargest = int32(1) - } - csorted := int32(0) - if sorted { - csorted = int32(1) - } - lib.AtgTopk(ctensorPtr0, ts.ctensor, k, dim, clargest, csorted) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) TopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - clargest := int32(0) - if largest { - clargest = int32(1) - } - csorted := int32(0) - if sorted { - csorted = int32(1) - } - lib.AtgTopkValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, clargest, csorted) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Totype(scalarType gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Trace(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrace(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func TraceBackward(grad *Tensor, sizes []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTraceBackward(ptr, grad.ctensor, sizes, len(sizes)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Transpose(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Transpose_(dim0 int64, dim1 int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func Trapezoid(y *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrapezoid(ptr, y.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func TrapezoidX(y *Tensor, x *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrapezoidX(ptr, y.ctensor, x.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Trapz(y *Tensor, x *Tensor, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func TrapzDx(y *Tensor, dx float64, dim int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrapzDx(ptr, y.ctensor, dx, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - ctranspose := int32(0) - if transpose { - ctranspose = int32(1) - } - cunitriangular := int32(0) - if unitriangular { - cunitriangular = int32(1) - } - lib.AtgTriangularSolve(ctensorPtr0, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) TriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cupper := int32(0) - if upper { - cupper = int32(1) - } - ctranspose := int32(0) - if transpose { - ctranspose = int32(1) - } - cunitriangular := int32(0) - if unitriangular { - cunitriangular = int32(1) - } - lib.AtgTriangularSolveX(ctensorPtr0, x.ctensor, m.ctensor, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) Tril(diagonal int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTril(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Tril_(diagonal int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTril_(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TrilOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func TripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cswap := int32(0) - if swap { - cswap = int32(1) - } - lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Triu(diagonal int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriu(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Triu_(diagonal int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriu_(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TriuOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TrueDivide(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TrueDivide_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) TrueDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TrueDivideScalar(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivideScalar(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TrueDivideScalar_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivideScalar_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) Trunc(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrunc(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Trunc_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrunc_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) TruncOut(out *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) TypeAs(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Unflatten(dim int64, sizes []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnflatten(ptr, ts.ctensor, dim, sizes, len(sizes)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnfoldBackward(ptr, gradIn.ctensor, inputSizes, len(inputSizes), dim, size, step) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Uniform_(from float64, to float64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUniform_(ptr, ts.ctensor, from, to) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) UniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - creturnInverse := int32(0) - if returnInverse { - creturnInverse = int32(1) - } - creturnCounts := int32(0) - if returnCounts { - creturnCounts = int32(1) - } - var cdimVal int64 = 0 - var cdimNull int = 1 - if len(dim) > 0 { - cdimVal = dim[0] - cdimNull = 0 - } - lib.AtgUniqueConsecutive(ctensorPtr0, ts.ctensor, creturnInverse, creturnCounts, cdimVal, cdimNull) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) UniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - csorted := int32(0) - if sorted { - csorted = int32(1) - } - creturnInverse := int32(0) - if returnInverse { - creturnInverse = int32(1) - } - creturnCounts := int32(0) - if returnCounts { - creturnCounts = int32(1) - } - lib.AtgUniqueDim(ctensorPtr0, ts.ctensor, dim, csorted, creturnInverse, creturnCounts) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) UniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) - - creturnInverse := int32(0) - if returnInverse { - creturnInverse = int32(1) - } - creturnCounts := int32(0) - if returnCounts { - creturnCounts = int32(1) - } - lib.AtgUniqueDimConsecutive(ctensorPtr0, ts.ctensor, dim, creturnInverse, creturnCounts) - if err = TorchErr(); err != nil { - return retVal0, retVal1, retVal2, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - - return retVal0, retVal1, retVal2, err -} - -func (ts *Tensor) Unsqueeze(dim int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnsqueeze(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Unsqueeze_(dim int64) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnsqueeze_(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBicubic2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBilinear2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleLinear1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleNearest1d(outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleNearest1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesVal float64 = 0.0 - var cscalesNull int = 1 - if len(scales) > 0 { - cscalesVal = scales[0] - cscalesNull = 0 - } - lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleTrilinear3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { - calignCorners = int32(1) - } - var cscalesDVal float64 = 0.0 - var cscalesDNull int = 1 - if len(scalesD) > 0 { - cscalesDVal = scalesD[0] - cscalesDNull = 0 - } - var cscalesHVal float64 = 0.0 - var cscalesHNull int = 1 - if len(scalesH) > 0 { - cscalesHVal = scalesH[0] - cscalesHNull = 0 - } - var cscalesWVal float64 = 0.0 - var cscalesWNull int = 1 - if len(scalesW) > 0 { - cscalesWVal = scalesW[0] - cscalesWNull = 0 - } - lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgValueSelectingReductionBackward(ptr, grad.ctensor, dim, indices.ctensor, sizes, len(sizes), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Values(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgValues(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Vander(x *Tensor, n []int64, increasing bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cnVal int64 = 0 - var cnNull int = 1 - if len(n) > 0 { - cnVal = n[0] - cnNull = 0 - } - cincreasing := int32(0) - if increasing { - cincreasing = int32(1) - } - lib.AtgVander(ptr, x.ctensor, cnVal, cnNull, cincreasing) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Var(unbiased bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - lib.AtgVar(ptr, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) VarCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ccorrectionVal int64 = 0 - var ccorrectionNull int = 1 - if len(correction) > 0 { - ccorrectionVal = correction[0] - ccorrectionNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgVarCorrection(ptr, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) VarCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ccorrectionVal int64 = 0 - var ccorrectionNull int = 1 - if len(correction) > 0 { - ccorrectionVal = correction[0] - ccorrectionNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgVarCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) VarDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgVarDim(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) VarMean(unbiased bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - lib.AtgVarMean(ctensorPtr0, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) VarMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - var ccorrectionVal int64 = 0 - var ccorrectionNull int = 1 - if len(correction) > 0 { - ccorrectionVal = correction[0] - ccorrectionNull = 0 - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgVarMeanCorrection(ctensorPtr0, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) VarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgVarMeanDim(ctensorPtr0, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal0, retVal1, err - } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - - return retVal0, retVal1, err -} - -func (ts *Tensor) VarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { - cunbiased = int32(1) - } - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Vdot(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgVdot(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) VdotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgVdotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) View(size []int64, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgView(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ViewAs(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgViewAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ViewAsComplex(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgViewAsComplex(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ViewAsReal(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgViewAsReal(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ViewDtype(dtype gotch.DType, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgViewDtype(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func Vstack(tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgVstack(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func VstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - lib.AtgVstackOut(ptr, out.ctensor, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func WhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgWhereScalar(ptr, condition.ctensor, selfScalar.cscalar, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) WhereScalarother(condition *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgWhereScalarother(ptr, condition.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func WhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgWhereScalarself(ptr, condition.ctensor, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) WhereSelf(condition *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgWhereSelf(ptr, condition.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Xlogy(other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogy(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Xlogy_(other *Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogy_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func (ts *Tensor) XlogyOutscalarOther(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogyOutscalarOther(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func XlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogyOutscalarSelf(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) XlogyOuttensor(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogyOuttensor(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) XlogyScalarOther(other *Scalar, del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogyScalarOther(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) XlogyScalarOther_(other *Scalar) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogyScalarOther_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func XlogyScalarSelf(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgXlogyScalarSelf(ptr, selfScalar.cscalar, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) Zero_() (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZero_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - ts.ctensor = *ptr - - return err -} - -func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZeros(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) ZerosLike(del bool) (retVal *Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZerosLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func ZerosOut(out *Tensor, size []int64) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZerosOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -// End of implementing Tensor ================================= +func(ts *Tensor) __And_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__And_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __AndTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__AndTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Iand_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __IandTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__IandTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Ilshift_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __IlshiftTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__IlshiftTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Ior_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __IorTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__IorTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Irshift_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __IrshiftTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__IrshiftTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Ixor_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __IxorTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__IxorTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Lshift_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __LshiftTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__LshiftTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Or_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Or_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __OrTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__OrTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Rshift_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __RshiftTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__RshiftTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __Xor_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) __XorTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__XorTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AddBatchDim(batchDim int64, level int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddBatchDim(ptr, ts.ctensor, batchDim, level) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AddRelu(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddRelu(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AddRelu_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddRelu_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _AddReluOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddReluOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AddReluScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddReluScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AddReluScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddReluScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _Aminmax(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_Aminmax(ctensorPtr0, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _AminmaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.Atg_AminmaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AmpUpdateScale_(ptr, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _AutocastToFullPrecision(cudaEnabled bool, cpuEnabled bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudaEnabled := int32(0) + if cudaEnabled { ccudaEnabled = int32(1) } +ccpuEnabled := int32(0) + if cpuEnabled { ccpuEnabled = int32(1) } + lib.Atg_AutocastToFullPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _AutocastToReducedPrecision(cudaEnabled bool, cpuEnabled bool, cudaDtype gotch.DType, cpuDtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudaEnabled := int32(0) + if cudaEnabled { ccudaEnabled = int32(1) } +ccpuEnabled := int32(0) + if cpuEnabled { ccpuEnabled = int32(1) } + lib.Atg_AutocastToReducedPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled, cudaDtype.CInt(), cpuDtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastByte(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastChar(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastDouble(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastFloat(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastHalf(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastInt(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastLong(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CastShort(nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Cat(tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.Atg_Cat(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.Atg_CatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CholeskySolveHelper(a *Tensor, upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Coalesce(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Coalesce(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Coalesced_(coalesced bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccoalesced := int32(0) + if coalesced { ccoalesced = int32(1) } + lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func _ComputeLinearCombination(input *Tensor, coefficients *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ComputeLinearCombination(ptr, input.ctensor, coefficients.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ComputeLinearCombinationOut(ptr, out.ctensor, input.ctensor, coefficients.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Conj(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Conj(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ConjPhysical(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ConjPhysical(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ConvDepthwise2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ConvDepthwise2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } + lib.Atg_ConvertIndicesFromCooToCsr(ptr, ts.ctensor, size, coutInt32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } + lib.Atg_ConvertIndicesFromCooToCsrOut(ptr, out.ctensor, ts.ctensor, size, coutInt32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ConvertIndicesFromCsrToCoo(crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } + lib.Atg_ConvertIndicesFromCsrToCoo(ptr, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ConvertIndicesFromCsrToCooOut(out *Tensor, crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } + lib.Atg_ConvertIndicesFromCsrToCooOut(ptr, out.ctensor, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } +cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } +callowTf32 := int32(0) + if allowTf32 { callowTf32 = int32(1) } + lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } +cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } + lib.Atg_ConvolutionDeprecated(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ConvolutionMode(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CopyFrom(dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _CopyFromAndResize(dst *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CopyFromAndResize(ptr, ts.ctensor, dst.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } + lib.Atg_CtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func _CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } + lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } + lib.Atg_CudnnCtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, cdeterministic, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CudnnRnn(input *Tensor, weight []Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0))) + + var cweight []lib.Ctensor + for _, t := range weight {cweight = append(cweight, t.ctensor)} +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } + lib.Atg_CudnnRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, weightBuf.ctensor, hx.ctensor, cx.ctensor, mode, hiddenSize, projSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, len(batchSizes), dropoutState.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, retVal4, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal4 = &Tensor{ctensor: *ctensorPtr4} + + return retVal0, retVal1, retVal2, retVal3, retVal4, err +} + +func _CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cweightArr []lib.Ctensor + for _, t := range weightArr {cweightArr = append(cweightArr, t.ctensor)} +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } + lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, cbatchFirst, cbidirectional) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CufftGetPlanCacheMaxSize(deviceIndex int64)(retVal int64, err error) { + + retVal = lib.Atg_CufftGetPlanCacheMaxSize(deviceIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func _CufftGetPlanCacheSize(deviceIndex int64)(retVal int64, err error) { + + retVal = lib.Atg_CufftGetPlanCacheSize(deviceIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) _DebugHasInternalOverlap(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_DebugHasInternalOverlap(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) _DetLuBasedHelper(del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_DetLuBasedHelper(ctensorPtr0, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) _DetLuBasedHelperBackwardHelper(detGrad *Tensor, det *Tensor, lu *Tensor, pivs *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_DetLuBasedHelperBackwardHelper(ptr, detGrad.ctensor, det.ctensor, ts.ctensor, lu.ctensor, pivs.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _DimArange(like *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_DimArange(ptr, like.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Dimi(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_Dimi(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) _Dimv(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_Dimv(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func _DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Efficientzerotensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Efficientzerotensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } +cincludeLastOffset := int32(0) + if includeLastOffset { cincludeLastOffset = int32(1) } + lib.Atg_EmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func _EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } + lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, paddingIdx) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } + lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } +cincludeLastOffset := int32(0) + if includeLastOffset { cincludeLastOffset = int32(1) } + lib.Atg_EmbeddingBagForwardOnly(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func _EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode, paddingIdx) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } + lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmptyAffineQuantized(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, len(size), scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EuclideanDist(x1 *Tensor, x2 *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EuclideanDist(ptr, x1.ctensor, x2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_FakeQuantizeLearnablePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_FakeQuantizeLearnablePerChannelAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_FakeQuantizeLearnablePerTensorAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_FakeQuantizeLearnablePerTensorAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) _FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_FakeQuantizePerTensorAffineCachemaskTensorQparams(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, fakeQuantEnabled.ctensor, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _FftC2c(dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cforward := int32(0) + if forward { cforward = int32(1) } + lib.Atg_FftC2c(ptr, ts.ctensor, dim, len(dim), normalization, cforward) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cforward := int32(0) + if forward { cforward = int32(1) } + lib.Atg_FftC2cOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, cforward) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_FftC2r(ptr, ts.ctensor, dim, len(dim), normalization, lastDimSize) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_FftC2rOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, lastDimSize) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FftR2c(dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + conesided := int32(0) + if onesided { conesided = int32(1) } + lib.Atg_FftR2c(ptr, ts.ctensor, dim, len(dim), normalization, conesided) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + conesided := int32(0) + if onesided { conesided = int32(1) } + lib.Atg_FftR2cOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, conesided) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _FusedDropout(p float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_FusedDropout(ctensorPtr0, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cperRowFakeQuant := int32(0) + if perRowFakeQuant { cperRowFakeQuant = int32(1) } +csymmetricQuant := int32(0) + if symmetricQuant { csymmetricQuant = int32(1) } + lib.Atg_FusedMovingAvgObsFqHelper(ctensorPtr0, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _FwPrimal(level int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_FwPrimal(ptr, ts.ctensor, level) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.Atg_GridSampler2dCpuFallback(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.Atg_GridSampler2dCpuFallbackBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_HasCompatibleShallowCopyType(ts.ctensor, from.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) _HasSameStorageNumel(other *Tensor, del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_HasSameStorageNumel(ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) _HistogramddFromBinTensors(bins []Tensor, weight *Tensor, density bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cbins []lib.Ctensor + for _, t := range bins {cbins = append(cbins, t.ctensor)} +cdensity := int32(0) + if density { cdensity = int32(1) } + lib.Atg_HistogramddFromBinTensors(ptr, ts.ctensor, cbins, len(cbins), weight.ctensor, cdensity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _IndexCopy_(dim int64, index *Tensor, source *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_IndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _Indices(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Indices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _IsZerotensor(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_IsZerotensor(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) _LinalgInvOutHelper_(infosLu *Tensor, infosGetri *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LinalgInvOutHelper_(ptr, ts.ctensor, infosLu.ctensor, infosGetri.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _LinalgQrHelper(mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_LinalgQrHelper(ctensorPtr0, ts.ctensor, mode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func _LinalgSvd(a *Tensor, fullMatrices bool, computeUv bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cfullMatrices := int32(0) + if fullMatrices { cfullMatrices = int32(1) } +ccomputeUv := int32(0) + if computeUv { ccomputeUv = int32(1) } + lib.Atg_LinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices, ccomputeUv) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func _LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, computeUv bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cfullMatrices := int32(0) + if fullMatrices { cfullMatrices = int32(1) } +ccomputeUv := int32(0) + if computeUv { ccomputeUv = int32(1) } + lib.Atg_LinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices, ccomputeUv) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } + lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LogSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } + lib.Atg_LogSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Logcumsumexp(dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Logcumsumexp(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _LuWithInfo(pivot bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cpivot := int32(0) + if pivot { cpivot = int32(1) } +ccheckErrors := int32(0) + if checkErrors { ccheckErrors = int32(1) } + lib.Atg_LuWithInfo(ctensorPtr0, ts.ctensor, cpivot, ccheckErrors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func _MakeDual(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MakeDual(ptr, primal.ctensor, tangent.ctensor, level) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _MaskedScale(mask *Tensor, scale float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _MaskedSoftmax(mask *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MaskedSoftmax(ptr, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _MkldnnReshape(shape []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, len(shape)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func _NativeMultiHeadSelfAttention(query *Tensor, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NativeMultiHeadSelfAttention(ptr, query.ctensor, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _NegView(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NegView(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _NewZerosWithSameFeatureMeta(other *Tensor, selfNumBatchDims int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NewZerosWithSameFeatureMeta(ptr, ts.ctensor, other.ctensor, selfNumBatchDims) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _NnpackAvailable()(retVal bool, err error) { + + retVal = lib.Atg_NnpackAvailable() + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func _NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Nnz(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_Nnz(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func _PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.Atg_PackPaddedSequence(ctensorPtr0, input.ctensor, lengths.ctensor, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func _PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, len(inputSize), batchSizes.ctensor, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.Atg_PadPackedSequence(ctensorPtr0, data.ctensor, batchSizes.ctensor, cbatchFirst, paddingValue.cscalar, totalLength) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _PinMemory(device gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_PinMemory(ptr, ts.ctensor, device.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_RemoveBatchDim(ptr, ts.ctensor, level, batchSize, outDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ReshapeAlias(size []int64, stride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ReshapeAlias(ptr, ts.ctensor, size, len(size), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ReshapeFromTensor(shape *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_RowwisePrune(ctensorPtr0, weight.ctensor, mask.ctensor, compressedIndicesDtype.CInt()) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _SWhere(condition *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SWhere(ptr, condition.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SampleDirichlet(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SampleDirichlet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SaturateWeightToFp16(weight *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SaturateWeightToFp16(ptr, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, axis int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SegmentReduceBackward(ptr, grad.ctensor, output.ctensor, data.ctensor, reduce, lengths.ctensor, axis) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ShapeAsTensor(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ShapeAsTensor(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SlowConv2dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_SlowConv2dBackward(ctensorPtr0, gradInput.ctensor, gradWeight.ctensor, gradBias.ctensor, gradOutput.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func _SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_SobolEngineDraw(ctensorPtr0, quasi.ctensor, n, sobolstate.ctensor, dimension, numGenerated, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _SobolEngineInitializeState_(dimension int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _SobolEngineScramble_(ltm *Tensor, dimension int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) _Softmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } + lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SoftmaxBackwardDataOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } + lib.Atg_SoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SolveHelper(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_SolveHelper(ctensorPtr0, ts.ctensor, a.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _SparseAddmm(sparse *Tensor, dense *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseAddmm(ptr, ts.ctensor, sparse.ctensor, dense.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseBroadcastTo(size []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseBroadcastTo(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, len(size), indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCsrTensorUnsafe(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseLogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } + lib.Atg_SparseLogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseLogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseLogSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseMaskHelper(t *Tensor, maskIndices *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseMaskHelper(ptr, t.ctensor, maskIndices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseMm(sparse *Tensor, dense *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } + lib.Atg_SparseSoftmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSparseMatmul(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSparseMatmul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSum(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSumBackward(grad *Tensor, dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSumDim(dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSumDim(ptr, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSumDimDtype(ptr, ts.ctensor, dim, len(dim), dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SparseSumDtype(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSumDtype(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Stack(tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.Atg_Stack(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _StackOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.Atg_StackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _StandardGamma(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_StandardGamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _StandardGammaGrad(output *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _SymeigHelper(eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ceigenvectors := int32(0) + if eigenvectors { ceigenvectors = int32(1) } +cupper := int32(0) + if upper { cupper = int32(1) } + lib.Atg_SymeigHelper(ctensorPtr0, ts.ctensor, ceigenvectors, cupper) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func _TestAmbiguousDefaults(dummy *Tensor, a int64, b int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestAmbiguousDefaults(ptr, dummy.ctensor, a, b) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestAmbiguousDefaultsB(ptr, dummy.ctensor, a, b) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _TestOptionalFilledIntlist(values *Tensor, addends []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestOptionalFilledIntlist(ptr, values.ctensor, addends, len(addends)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _TestOptionalIntlist(values *Tensor, addends []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestOptionalIntlist(ptr, values.ctensor, addends, len(addends)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _TestSerializationSubcmul(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestSerializationSubcmul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _TestStringDefault(dummy *Tensor, a string, b string)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestStringDefault(ptr, dummy.ctensor, a, b) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _TestWarnInAutograd(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestWarnInAutograd(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.Atg_ToCopy(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _TorchCudaCuLinkerSymbolOp(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TorchCudaCuLinkerSymbolOp(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, len(expand1), expand2, len(expand2), expand3, len(expand3), sumdim, len(sumdim), unrollDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Unique(sorted bool, returnInverse bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + csorted := int32(0) + if sorted { csorted = int32(1) } +creturnInverse := int32(0) + if returnInverse { creturnInverse = int32(1) } + lib.Atg_Unique(ctensorPtr0, ts.ctensor, csorted, creturnInverse) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + csorted := int32(0) + if sorted { csorted = int32(1) } +creturnInverse := int32(0) + if returnInverse { creturnInverse = int32(1) } +creturnCounts := int32(0) + if returnCounts { creturnCounts = int32(1) } + lib.Atg_Unique2(ctensorPtr0, ts.ctensor, csorted, creturnInverse, creturnCounts) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func _UnpackDual(dual *Tensor, level int64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_UnpackDual(ctensorPtr0, dual.ctensor, level) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) _UnsafeView(size []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_UnsafeView(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleBicubic2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBicubic2dAa(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleBicubic2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBicubic2dAaBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleBicubic2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBicubic2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleBicubic2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBicubic2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleBilinear2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBilinear2dAa(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleBilinear2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBilinear2dAaBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleBilinear2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBilinear2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleBilinear2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleBilinear2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleNearestExact1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.Atg_UpsampleNearestExact1d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleNearestExact1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.Atg_UpsampleNearestExact1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleNearestExact1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.Atg_UpsampleNearestExact1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleNearestExact1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.Atg_UpsampleNearestExact1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleNearestExact2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact2d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleNearestExact2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleNearestExact2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleNearestExact2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleNearestExact3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact3d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleNearestExact3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UpsampleNearestExact3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _UpsampleNearestExact3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.Atg_UpsampleNearestExact3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64)(retVal bool, err error) { + + retVal = lib.Atg_UseCudnnCtcLoss(logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func _UseCudnnRnnFlattenWeight()(retVal bool, err error) { + + retVal = lib.Atg_UseCudnnRnnFlattenWeight() + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) _Values(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Values(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) _Version(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.Atg_Version(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func _WeightNorm(v *Tensor, g *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _WeightNormCudaInterface(v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_WeightNormCudaInterface(ctensorPtr0, v.ctensor, g.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func _WeightNormCudaInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_WeightNormCudaInterfaceBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func _WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.Atg_WeightNormDifferentiableBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Abs(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbs(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Abs_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbs_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AbsOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Absolute(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsolute(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Absolute_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsolute_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AbsoluteOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsoluteOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Acos(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcos(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Acos_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AcosOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Acosh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcosh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Acosh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcosh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AcoshOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcoshOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dBackward(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveMaxPool1d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgAdaptiveMaxPool1d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) AdaptiveMaxPool2d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgAdaptiveMaxPool2d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) AdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgAdaptiveMaxPool2dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) AdaptiveMaxPool3d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgAdaptiveMaxPool3d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) AdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgAdaptiveMaxPool3dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Add(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Add_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AddOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AddScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AddScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Addbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addbmm_(batch1 *Tensor, batch2 *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addcdiv_(tensor1 *Tensor, tensor2 *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addcmul(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addcmul_(tensor1 *Tensor, tensor2 *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addmm_(mat1 *Tensor, mat2 *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addmv(mat *Tensor, vec *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addmv_(mat *Tensor, vec *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addr(vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Addr_(vec1 *Tensor, vec2 *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Adjoint(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdjoint(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func AffineGridGenerator(theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, len(size), calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func AffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, len(size), calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Alias(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAlias(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AlignAs(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) All(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAll(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AllAllOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAllAllOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AllDim(dim int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAllDim(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AllOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Allclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + cequalNan := int32(0) + if equalNan { cequalNan = int32(1) } + retVal = lib.AtgAllclose(ts.ctensor, other.ctensor, rtol, atol, cequalNan) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func AlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AlphaDropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Amax(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAmax(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAmaxOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Amin(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAmin(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAminOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Aminmax(dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAminmax(ctensorPtr0, ts.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) AminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAminmaxOut(ctensorPtr0, min.ctensor, max.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Angle(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAngle(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AngleOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Any(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAny(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AnyAllOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAnyAllOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AnyDim(dim int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAnyDim(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AnyOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Arange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeOut(out *Tensor, end *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeOut(ptr, out.ctensor, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeStart(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeStartOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeStartOut(ptr, out.ctensor, start.cscalar, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeStartStep(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeStartStep(ptr, start.cscalar, end.cscalar, step.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arccos(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccos(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arccos_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ArccosOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arccosh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccosh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arccosh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccosh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ArccoshOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccoshOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arcsin(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arcsin_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ArcsinOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arcsinh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arcsinh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ArcsinhOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arctan(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arctan2(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctan2(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arctan2_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctan2_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Arctan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arctan_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ArctanOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arctanh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Arctanh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ArctanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Argmax(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgArgmax(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgArgmaxOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Argmin(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgArgmin(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ArgminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgArgminOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Argsort(dim int64, descending bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cdescending := int32(0) + if descending { cdescending = int32(1) } + lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Argwhere(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArgwhere(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cstorageOffsetVal int64 = 0 + var cstorageOffsetNull int = 1 + if len(storageOffset) > 0 { + cstorageOffsetVal = storageOffset[0] + cstorageOffsetNull = 0 + } + lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AsStrided_(size []int64, stride []int64, storageOffset []int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cstorageOffsetVal int64 = 0 + var cstorageOffsetNull int = 1 + if len(storageOffset) > 0 { + cstorageOffsetVal = storageOffset[0] + cstorageOffsetNull = 0 + } + lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Asin(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Asin_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AsinOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Asinh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Asinh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AsinhOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atan(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atan2(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atan2_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Atan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atan_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AtanOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atanh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atanh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) AtanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atleast1d(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtleast1d(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atleast2d(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtleast2d(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Atleast3d(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtleast3d(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } + lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Baddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Baddbmm_(batch1 *Tensor, batch2 *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgBartlettWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } + lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor, count.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + cinputG := int32(0) + if inputG { cinputG = int32(1) } +cweightG := int32(0) + if weightG { cweightG = int32(1) } +cbiasG := int32(0) + if biasG { cbiasG = int32(1) } + lib.AtgBatchNormBackwardReduce(ctensorPtr0, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, cinputG, cweightG, cbiasG) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func BatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgBatchNormGatherStats(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, count) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func BatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgBatchNormGatherStatsWithCounts(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, counts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func BatchNormStats(input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgBatchNormStats(ctensorPtr0, input.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func BatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgBatchNormUpdateStats(ctensorPtr0, input.ctensor, runningMean.ctensor, runningVar.ctensor, momentum) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Bernoulli(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Bernoulli_(p *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BernoulliFloat_(p float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulliFloat_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BernoulliOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulliOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BernoulliP(p float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulliP(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Bilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyWithLogitsBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Bincount(weights *Tensor, minlength int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Binomial(count *Tensor, prob *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinomial(ptr, count.ctensor, prob.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseAnd(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseAnd_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseAndScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseAndTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseAndTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseAndTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseLeftShift(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseLeftShift(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseLeftShift_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseLeftShift_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func BitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseLeftShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseLeftShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseLeftShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseLeftShiftTensorScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseLeftShiftTensorScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseLeftShiftTensorScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseLeftShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseNot(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNot(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseNot_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNot_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseNotOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseOr(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseOr_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseOrScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseOrTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseOrTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseOrTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseRightShift(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseRightShift(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseRightShift_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseRightShift_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func BitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseRightShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseRightShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseRightShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseRightShiftTensorScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseRightShiftTensorScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseRightShiftTensorScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseRightShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseXor(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseXor_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseXorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseXorTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BitwiseXorTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) BitwiseXorTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgBlackmanWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BlockDiag(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgBlockDiag(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Bmm(mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BmmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BroadcastTo(size []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBroadcastTo(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Bucketize(boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +cright := int32(0) + if right { cright = int32(1) } + lib.AtgBucketize(ptr, ts.ctensor, boundaries.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +cright := int32(0) + if right { cright = int32(1) } + lib.AtgBucketizeScalar(ptr, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) BucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +cright := int32(0) + if right { cright = int32(1) } + lib.AtgBucketizeTensorOut(ptr, out.ctensor, ts.ctensor, boundaries.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CanCast(from gotch.DType, to gotch.DType)(retVal bool, err error) { + + retVal = lib.AtgCanCast(from.CInt(), to.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func CartesianProd(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgCartesianProd(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Cat(tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgCat(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cauchy_(median float64, sigma float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCauchy_(ptr, ts.ctensor, median, sigma) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ccomputeModeVal int64 = 0 + var ccomputeModeNull int = 1 + if len(computeMode) > 0 { + ccomputeModeVal = computeMode[0] + ccomputeModeNull = 0 + } + lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, ccomputeModeVal, ccomputeModeNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ceil(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeil(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ceil_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeil_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) CeilOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Celu(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Celu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func ChainMatmul(matrices []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cmatrices []lib.Ctensor + for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)} + lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ChainMatmulOut(out *Tensor, matrices []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cmatrices []lib.Ctensor + for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)} + lib.AtgChainMatmulOut(ptr, out.ctensor, cmatrices, len(cmatrices)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ChannelShuffle(groups int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgChannelShuffle(ptr, ts.ctensor, groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cholesky(upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgCholesky(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CholeskyInverse(upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CholeskyInverseOut(out *Tensor, upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CholeskySolve(input2 *Tensor, upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgChooseQparamsOptimized(ctensorPtr0, input.ctensor, numel, nBins, ratio, bitWidth) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Clamp(min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Clamp_(min *Scalar, max *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClampMax(max *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMax(ptr, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampMax_(max *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClampMaxOut(out *Tensor, max *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampMaxTensor(max *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMaxTensor(ptr, ts.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampMaxTensor_(max *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMaxTensor_(ptr, ts.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClampMaxTensorOut(out *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMaxTensorOut(ptr, out.ctensor, ts.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampMin(min *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMin(ptr, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampMin_(min *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClampMinOut(out *Tensor, min *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampMinTensor(min *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMinTensor(ptr, ts.ctensor, min.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampMinTensor_(min *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMinTensor_(ptr, ts.ctensor, min.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClampMinTensorOut(out *Tensor, min *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMinTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampTensor(ptr, ts.ctensor, min.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClampTensor_(min *Tensor, max *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Clip(min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClip(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Clip_(min *Scalar, max *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClip_(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClipOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClipOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClipTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClipTensor(ptr, ts.ctensor, min.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ClipTensor_(min *Tensor, max *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClipTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClipTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Coalesce(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCoalesce(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Col2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2im(ptr, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Col2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imBackward(ptr, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Col2imBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Col2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ColIndices(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgColIndices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ColumnStack(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgColumnStack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ColumnStackOut(out *Tensor, tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgColumnStackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Combinations(r int64, withReplacement bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cwithReplacement := int32(0) + if withReplacement { cwithReplacement = int32(1) } + lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Complex(real *Tensor, imag *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgComplex(ptr, real.ctensor, imag.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ComplexOut(out *Tensor, real *Tensor, imag *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgComplexOut(ptr, out.ctensor, real.ctensor, imag.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Concat(tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgConcat(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConcatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgConcatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Conj(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConj(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ConjPhysical(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConjPhysical(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ConjPhysical_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConjPhysical_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConjPhysicalOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ConstantPadNd(pad []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConstantPadNd(ptr, ts.ctensor, pad, len(pad)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Contiguous(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgContiguous(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv1dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv2dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv3dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvDepthwise3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgConvTbcBackward(ctensorPtr0, ts.ctensor, input.ctensor, weight.ctensor, bias.ctensor, pad) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func ConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } + lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } + lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CopySparseToSparse_(src *Tensor, nonBlocking bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } + lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Copysign(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCopysign(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Copysign_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCopysign_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) CopysignOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCopysignOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CopysignScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCopysignScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CopysignScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCopysignScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) CopysignScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCopysignScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Corrcoef(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCorrcoef(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cos(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCos(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cos_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) CosOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cosh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cosh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) CoshOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CountNonzero(dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgCountNonzero(ptr, ts.ctensor, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CountNonzeroDimIntlist(dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCountNonzeroDimIntlist(ptr, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cov(correction int64, fweights *Tensor, aweights *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCov(ptr, ts.ctensor, correction, fweights.ctensor, aweights.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cross(other *Tensor, dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgCross(ptr, ts.ctensor, other.ctensor, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCrossEntropyLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, labelSmoothing) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CrowIndices(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCrowIndices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } + lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, reduction, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } + lib.AtgCtcLossTensor(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgCudnnBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func CudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgCudnnBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon, reserveSpace.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) CudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +callowTf32 := int32(0) + if allowTf32 { callowTf32 = int32(1) } + lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnConvolutionAddRelu(ptr, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnConvolutionRelu(ptr, ts.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +callowTf32 := int32(0) + if allowTf32 { callowTf32 = int32(1) } + lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgCudnnGridSamplerBackward(ctensorPtr0, ts.ctensor, grid.ctensor, gradOutput.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) CudnnIsAcceptable(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgCudnnIsAcceptable(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) Cummax(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgCummax(ctensorPtr0, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) CummaxOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgCummaxOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func CummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCummaxminBackward(ptr, grad.ctensor, input.ctensor, indices.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cummin(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgCummin(ctensorPtr0, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) CumminOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgCumminOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cumprod_(dim int64, dtype gotch.DType)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprod_(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func CumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprodBackward(ptr, grad.ctensor, input.ctensor, dim, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) CumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cumsum(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Cumsum_(dim int64, dtype gotch.DType)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumsum_(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) CumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CumulativeTrapezoid(y *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumulativeTrapezoid(ptr, y.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumulativeTrapezoidX(ptr, y.ctensor, x.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Data(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgData(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Deg2rad(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDeg2rad(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Deg2rad_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDeg2rad_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Deg2radOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDeg2radOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DenseDim(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgDenseDim(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) Dequantize(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDequantize(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Det(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Detach(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDetach(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Detach_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDetach_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Diag(diagonal int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiag(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func DiagBackward(grad *Tensor, inputSizes []int64, diagonal int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagBackward(ptr, grad.ctensor, inputSizes, len(inputSizes), diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DiagOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Diagflat(offset int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagflat(ptr, ts.ctensor, offset) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func DiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagonalBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DiagonalScatter(src *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagonalScatter(ptr, ts.ctensor, src.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Diff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiff(ptr, ts.ctensor, n, dim, prepend.ctensor, append.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiffOut(ptr, out.ctensor, ts.ctensor, n, dim, prepend.ctensor, append.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Digamma(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Digamma_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigamma_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) DigammaOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Dist(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDist(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Div(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Div_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) DivOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) DivScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivScalarMode_(other *Scalar, roundingMode string)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) DivTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivTensorMode_(other *Tensor, roundingMode string)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Divide(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Divide_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) DivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivideScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivideScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) DivideScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivideScalarMode_(other *Scalar, roundingMode string)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) DivideTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DivideTensorMode_(other *Tensor, roundingMode string)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Dot(tensor *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDot(ptr, ts.ctensor, tensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) DotOut(out *Tensor, tensor *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Dropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Dropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func Dstack(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgDstack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func DstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgDstackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Eig(eigenvectors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ceigenvectors := int32(0) + if eigenvectors { ceigenvectors = int32(1) } + lib.AtgEig(ctensorPtr0, ts.ctensor, ceigenvectors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) EigE(e *Tensor, v *Tensor, eigenvectors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ceigenvectors := int32(0) + if eigenvectors { ceigenvectors = int32(1) } + lib.AtgEigE(ctensorPtr0, e.ctensor, v.ctensor, ts.ctensor, ceigenvectors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func Einsum(equation string, tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgEinsum(ptr, equation, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Elu(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgElu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Elu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgElu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func EluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cisResult := int32(0) + if isResult { cisResult = int32(1) } + lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cisResult := int32(0) + if isResult { cisResult = int32(1) } + lib.AtgEluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) EluOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Embedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } + lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } + lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } +cincludeLastOffset := int32(0) + if includeLastOffset { cincludeLastOffset = int32(1) } + lib.AtgEmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func EmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } +cincludeLastOffset := int32(0) + if includeLastOffset { cincludeLastOffset = int32(1) } +var cpaddingIdxVal int64 = 0 + var cpaddingIdxNull int = 1 + if len(paddingIdx) > 0 { + cpaddingIdxVal = paddingIdx[0] + cpaddingIdxNull = 0 + } + lib.AtgEmbeddingBagPaddingIdx(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, cpaddingIdxVal, cpaddingIdxNull) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func EmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } + lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) EmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func EmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } + lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmpty(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) EmptyLike(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmptyOut(out *Tensor, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyQuantized(ptr, size, len(size), qtensor.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyStrided(ptr, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Eq(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Eq_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) EqScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) EqTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) EqTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) EqTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Equal(other *Tensor, del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgEqual(ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) Erf(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Erf_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErf_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ErfOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Erfc(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Erfc_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfc_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ErfcOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Erfinv(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinv(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Erfinv_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinv_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ErfinvOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Exp(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Exp2(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp2(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Exp2_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp2_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Exp2Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp2Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Exp_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ExpOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Expand(size []int64, implicit bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cimplicit := int32(0) + if implicit { cimplicit = int32(1) } + lib.AtgExpand(ptr, ts.ctensor, size, len(size), cimplicit) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ExpandAs(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Expm1(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Expm1_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Expm1Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Exponential_(lambd float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExponential_(ptr, ts.ctensor, lambd) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEyeM(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EyeMOut(out *Tensor, n int64, m int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEyeMOut(ptr, out.ctensor, n, m) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EyeOut(out *Tensor, n int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEyeOut(ptr, out.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFakeQuantizePerChannelAffineCachemask(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func FakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerChannelAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFakeQuantizePerTensorAffineCachemask(ctensorPtr0, ts.ctensor, scale, zeroPoint, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func FakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerTensorAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerTensorAffineTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackGemmMatrixFp16(input *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackQuantizedMatrix(input *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackQuantizedMatrixKn(ptr, input.ctensor, k, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FeatureAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FeatureAlphaDropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func FeatureDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FeatureDropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FftFft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftFft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftFft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftFftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FftFftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFftfreqOut(ptr, out.ctensor, n, d) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftFftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftFftshift(dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFftshift(ptr, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftHfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftHfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftHfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftHfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftHfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftHfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftHfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftHfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftHfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftHfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftHfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIfftshift(dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIfftshift(ptr, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIhfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIhfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIhfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIhfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIhfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIhfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIhfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIhfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIhfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIhfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIhfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIrfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIrfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIrfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIrfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIrfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIrfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIrfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIrfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIrfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftRfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftRfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftRfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftRfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftRfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftRfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftRfftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FftRfftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftRfftfreqOut(ptr, out.ctensor, n, d) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftRfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftRfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftRfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Fill_(value *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFill_(ptr, ts.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FillDiagonal_(fillValue *Scalar, wrap bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cwrap := int32(0) + if wrap { cwrap = int32(1) } + lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FillTensor_(value *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFillTensor_(ptr, ts.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Fix(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFix(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Fix_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFix_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FixOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFixOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Flatten(startDim int64, endDim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FlattenDenseTensors(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgFlattenDenseTensors(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Flip(dims []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlip(ptr, ts.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Fliplr(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFliplr(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Flipud(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlipud(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloatPower(exponent *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPower(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloatPower_(exponent *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPower_(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func FloatPowerScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPowerScalar(ptr, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPowerScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloatPowerTensor_(exponent *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPowerTensor_(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FloatPowerTensorScalar(exponent *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPowerTensorScalar(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPowerTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloatPowerTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Floor(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloor(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Floor_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloor_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FloorDivide(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloorDivide_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FloorDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloorDivideScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivideScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FloorDivideScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivideScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FloorOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Fmax(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmax(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FmaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Fmin(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmin(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FminOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFminOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Fmod(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Fmod_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FmodScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FmodTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FmodTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FmodTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Frac(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrac(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Frac_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrac_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) FracOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFracOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFractionalMaxPool2d(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) FractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFractionalMaxPool2dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) FractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFractionalMaxPool3d(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) FractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFractionalMaxPool3dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Frexp(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFrexp(ctensorPtr0, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) FrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgFrexpTensorOut(ctensorPtr0, mantissa.ctensor, exponent.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) FrobeniusNorm(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrobeniusNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FrobeniusNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgFrobeniusNormDim(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cshared := int32(0) + if shared { cshared = int32(1) } +var csizeVal int64 = 0 + var csizeNull int = 1 + if len(size) > 0 { + csizeVal = size[0] + csizeNull = 0 + } + lib.AtgFromFile(ptr, filename, cshared, csizeVal, csizeNull, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Full(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFull(ptr, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FullLike(fillValue *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FullOut(out *Tensor, size []int64, fillValue *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFullOut(ptr, out.ctensor, size, len(size), fillValue.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) FusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperRowFakeQuant := int32(0) + if perRowFakeQuant { cperRowFakeQuant = int32(1) } +csymmetricQuant := int32(0) + if symmetricQuant { csymmetricQuant = int32(1) } + lib.AtgFusedMovingAvgObsFakeQuant(ptr, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Gather(dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { csparseGrad = int32(1) } + lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { csparseGrad = int32(1) } + lib.AtgGatherBackward(ptr, grad.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { csparseGrad = int32(1) } + lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Gcd(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGcd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Gcd_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGcd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GcdOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGcdOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ge(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ge_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GeTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GeTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Gelu(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GeluBackward(grad *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeluBackward(ptr, grad.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GeluBackwardGradInput(gradInput *Tensor, grad *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeluBackwardGradInput(ptr, gradInput.ctensor, grad.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GeluOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Geometric_(p float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeometric_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Geqrf(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgGeqrf(ctensorPtr0, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) GeqrfA(a *Tensor, tau *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgGeqrfA(ctensorPtr0, a.ctensor, tau.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Ger(vec2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGer(ptr, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GerOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Glu(dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGlu(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GluBackward(gradOutput *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GluOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Grad(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGrad(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Greater(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreater(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Greater_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreater_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GreaterEqual(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqual(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GreaterEqual_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqual_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GreaterEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GreaterEqualTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqualTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GreaterEqualTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqualTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GreaterEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GreaterScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GreaterTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GreaterTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GreaterTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler3dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } + lib.AtgGridSampler3dBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func GroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } + lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Gru(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.AtgGru(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func GruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } + lib.AtgGruData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Gt(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Gt_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GtTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) GtTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) GtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgHammingWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgHammingWindowPeriodicAlpha(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgHammingWindowPeriodicAlphaBeta(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgHannWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hardshrink(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrink(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOut.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardshrinkOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrinkOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hardsigmoid(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hardsigmoid_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoid_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) HardsigmoidBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardsigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hardswish(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswish(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hardswish_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswish_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) HardswishBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswishBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardswishOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswishOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hardtanh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hardtanh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) HardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HardtanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Heaviside(values *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHeaviside(ptr, ts.ctensor, values.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Heaviside_(values *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHeaviside_(ptr, ts.ctensor, values.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) HeavisideOut(out *Tensor, values *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHeavisideOut(ptr, out.ctensor, ts.ctensor, values.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Histc(bins int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHistc(ptr, ts.ctensor, bins) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Hspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Hstack(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgHstack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgHstackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HuberLoss(target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHuberLoss(ptr, ts.ctensor, target.ctensor, reduction, delta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHuberLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHuberLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) HuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHuberLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, delta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hypot(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHypot(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Hypot_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHypot_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) HypotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHypotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) I0(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgI0(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) I0_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgI0_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) I0Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgI0Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Igamma(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIgamma(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Igamma_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIgamma_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) IgammaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIgammaOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Igammac(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIgammac(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Igammac_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIgammac_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) IgammacOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIgammacOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Im2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2col(ptr, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Im2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colBackward(ptr, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Im2colBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Im2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Imag(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgImag(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IndexAdd_(dim int64, index *Tensor, source *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) IndexAddOut(out *Tensor, dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IndexCopy(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IndexCopy_(dim int64, index *Tensor, source *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) IndexFill(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IndexFill_(dim int64, index *Tensor, value *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) IndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFillIntTensor(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IndexFillIntTensor_(dim int64, index *Tensor, value *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFillIntTensor_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func IndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelectBackward(ptr, grad.ctensor, selfSizes, len(selfSizes), dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Indices(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) InfinitelyDifferentiableGeluBackward(grad *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInfinitelyDifferentiableGeluBackward(ptr, grad.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Inner(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInner(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) InnerOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInnerOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func InstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cuseInputStats := int32(0) + if useInputStats { cuseInputStats = int32(1) } +ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } + lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IntRepr(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIntRepr(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Inverse(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInverse(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) InverseOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IsCoalesced(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsCoalesced(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsComplex(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsComplex(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsConj(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsConj(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsDistributed(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsDistributed(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsFloatingPoint(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsFloatingPoint(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsInference(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsInference(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsLeaf(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsLeaf(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsNeg(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsNeg(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsNonzero(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsNonzero(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsPinned(device gotch.Device, del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsPinned(ts.ctensor, device.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsSameSize(other *Tensor, del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsSameSize(ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsSetTo(tensor *Tensor, del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsSetTo(ts.ctensor, tensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) IsSigned(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgIsSigned(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func IsVulkanAvailable()(retVal bool, err error) { + + retVal = lib.AtgIsVulkanAvailable() + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) Isclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cequalNan := int32(0) + if equalNan { cequalNan = int32(1) } + lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Isfinite(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsfinite(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Isin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cassumeUnique := int32(0) + if assumeUnique { cassumeUnique = int32(1) } +cinvert := int32(0) + if invert { cinvert = int32(1) } + lib.AtgIsin(ptr, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func IsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cassumeUnique := int32(0) + if assumeUnique { cassumeUnique = int32(1) } +cinvert := int32(0) + if invert { cinvert = int32(1) } + lib.AtgIsinScalarTensor(ptr, element.cscalar, testElements.ctensor, cassumeUnique, cinvert) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func IsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cassumeUnique := int32(0) + if assumeUnique { cassumeUnique = int32(1) } +cinvert := int32(0) + if invert { cinvert = int32(1) } + lib.AtgIsinScalarTensorOut(ptr, out.ctensor, element.cscalar, testElements.ctensor, cassumeUnique, cinvert) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func IsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cassumeUnique := int32(0) + if assumeUnique { cassumeUnique = int32(1) } +cinvert := int32(0) + if invert { cinvert = int32(1) } + lib.AtgIsinTensorScalar(ptr, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func IsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cassumeUnique := int32(0) + if assumeUnique { cassumeUnique = int32(1) } +cinvert := int32(0) + if invert { cinvert = int32(1) } + lib.AtgIsinTensorScalarOut(ptr, out.ctensor, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func IsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cassumeUnique := int32(0) + if assumeUnique { cassumeUnique = int32(1) } +cinvert := int32(0) + if invert { cinvert = int32(1) } + lib.AtgIsinTensorTensorOut(ptr, out.ctensor, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Isinf(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsinf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Isnan(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsnan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Isneginf(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsneginf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IsneginfOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsneginfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Isposinf(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsposinf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) IsposinfOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsposinfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Isreal(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsreal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Istft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var chopLengthVal int64 = 0 + var chopLengthNull int = 1 + if len(hopLength) > 0 { + chopLengthVal = hopLength[0] + chopLengthNull = 0 + } +var cwinLengthVal int64 = 0 + var cwinLengthNull int = 1 + if len(winLength) > 0 { + cwinLengthVal = winLength[0] + cwinLengthNull = 0 + } +ccenter := int32(0) + if center { ccenter = int32(1) } +cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +conesided := int32(0) + if onesided { conesided = int32(1) } +var clengthVal int64 = 0 + var clengthNull int = 1 + if len(length) > 0 { + clengthVal = length[0] + clengthNull = 0 + } +creturnComplex := int32(0) + if returnComplex { creturnComplex = int32(1) } + lib.AtgIstft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, cnormalized, conesided, clengthVal, clengthNull, creturnComplex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func KaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKaiserWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func KaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgKaiserWindowBeta(ptr, windowLength, cperiodic, beta, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func KaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } + lib.AtgKaiserWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) KlDiv(target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + clogTarget := int32(0) + if logTarget { clogTarget = int32(1) } + lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction, clogTarget) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) KlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + clogTarget := int32(0) + if logTarget { clogTarget = int32(1) } + lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, clogTarget) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Kron(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKron(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) KronOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKronOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Kthvalue(k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgKthvalue(ctensorPtr0, ts.ctensor, k, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) KthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgKthvalueValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) L1Loss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) L1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) L1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) L1LossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudnnEnable := int32(0) + if cudnnEnable { ccudnnEnable = int32(1) } + lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps, ccudnnEnable) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Lcm(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLcm(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Lcm_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLcm_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LcmOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLcmOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ldexp(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLdexp(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ldexp_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLdexp_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LdexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLdexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Le(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Le_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LeTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LeTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LeakyRelu(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyRelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LeakyRelu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyRelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cselfIsResult := int32(0) + if selfIsResult { cselfIsResult = int32(1) } + lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cselfIsResult := int32(0) + if selfIsResult { cselfIsResult = int32(1) } + lib.AtgLeakyReluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LeakyReluOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Lerp(end *Tensor, weight *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Lerp_(end *Tensor, weight *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpScalarOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LerpTensor(end *Tensor, weight *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpTensor(ptr, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LerpTensor_(end *Tensor, weight *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpTensor_(ptr, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpTensorOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Less(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLess(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Less_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLess_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LessEqual(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqual(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LessEqual_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqual_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LessEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LessEqualTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqualTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LessEqualTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqualTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LessEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LessScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LessTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LessTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LessTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Lgamma(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Lgamma_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgamma_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LgammaOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCholesky(upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgLinalgCholesky(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCholeskyEx(upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +ccheckErrors := int32(0) + if checkErrors { ccheckErrors = int32(1) } + lib.AtgLinalgCholeskyEx(ctensorPtr0, ts.ctensor, cupper, ccheckErrors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +ccheckErrors := int32(0) + if checkErrors { ccheckErrors = int32(1) } + lib.AtgLinalgCholeskyExL(ctensorPtr0, l.ctensor, info.ctensor, ts.ctensor, cupper, ccheckErrors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgLinalgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCond(p *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgCond(ptr, ts.ctensor, p.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCondOut(out *Tensor, p *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgCondOut(ptr, out.ctensor, ts.ctensor, p.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCondPStr(p string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgCondPStr(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCondPStrOut(out *Tensor, p string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgCondPStrOut(ptr, out.ctensor, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCross(other *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgCross(ptr, ts.ctensor, other.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgCrossOut(out *Tensor, other *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgDet(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgDet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgDetOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgDetOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgDiagonal(a *Tensor, offset int64, dim1 int64, dim2 int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgDiagonal(ptr, a.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgEig(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgEig(ctensorPtr0, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgEigOut(ctensorPtr0, eigenvalues.ctensor, eigenvectors.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgEigh(uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgEigh(ctensorPtr0, ts.ctensor, uPLO) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgEighEigvals(ctensorPtr0, eigvals.ctensor, eigvecs.ctensor, ts.ctensor, uPLO) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgEigvals(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgEigvals(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgEigvalsOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgEigvalsOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgEigvalsh(uPLO string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgEigvalsh(ptr, ts.ctensor, uPLO) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgEigvalshOut(out *Tensor, uPLO string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgEigvalshOut(ptr, out.ctensor, ts.ctensor, uPLO) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgHouseholderProduct(input *Tensor, tau *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgHouseholderProduct(ptr, input.ctensor, tau.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgHouseholderProductOut(ptr, out.ctensor, input.ctensor, tau.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgInv(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgInv(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgInvEx(checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ccheckErrors := int32(0) + if checkErrors { ccheckErrors = int32(1) } + lib.AtgLinalgInvEx(ctensorPtr0, ts.ctensor, ccheckErrors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgInvExInverse(inverse *Tensor, info *Tensor, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ccheckErrors := int32(0) + if checkErrors { ccheckErrors = int32(1) } + lib.AtgLinalgInvExInverse(ctensorPtr0, inverse.ctensor, info.ctensor, ts.ctensor, ccheckErrors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgInvOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgInvOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgLstsq(b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + var crcondVal float64 = 0.0 + var crcondNull int = 1 + if len(rcond) > 0 { + crcondVal = rcond[0] + crcondNull = 0 + } + lib.AtgLinalgLstsq(ctensorPtr0, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func(ts *Tensor) LinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + + var crcondVal float64 = 0.0 + var crcondNull int = 1 + if len(rcond) > 0 { + crcondVal = rcond[0] + crcondNull = 0 + } + lib.AtgLinalgLstsqOut(ctensorPtr0, solution.ctensor, residuals.ctensor, rank.ctensor, singularValues.ctensor, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + + return retVal0, retVal1, retVal2, retVal3, err +} + +func LinalgLuFactor(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cpivot := int32(0) + if pivot { cpivot = int32(1) } + lib.AtgLinalgLuFactor(ctensorPtr0, a.ctensor, cpivot) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func LinalgLuFactorEx(a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cpivot := int32(0) + if pivot { cpivot = int32(1) } +ccheckErrors := int32(0) + if checkErrors { ccheckErrors = int32(1) } + lib.AtgLinalgLuFactorEx(ctensorPtr0, a.ctensor, cpivot, ccheckErrors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func LinalgLuFactorExOut(lU *Tensor, pivots *Tensor, info *Tensor, a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cpivot := int32(0) + if pivot { cpivot = int32(1) } +ccheckErrors := int32(0) + if checkErrors { ccheckErrors = int32(1) } + lib.AtgLinalgLuFactorExOut(ctensorPtr0, lU.ctensor, pivots.ctensor, info.ctensor, a.ctensor, cpivot, ccheckErrors) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func LinalgLuFactorOut(lU *Tensor, pivots *Tensor, a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cpivot := int32(0) + if pivot { cpivot = int32(1) } + lib.AtgLinalgLuFactorOut(ctensorPtr0, lU.ctensor, pivots.ctensor, a.ctensor, cpivot) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgMatmul(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgMatmul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatrixExp(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgMatrixExp(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatrixPower(n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgMatrixPower(ptr, ts.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatrixRank(tol float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRank(ptr, ts.ctensor, tol, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatrixRankAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var catolVal float64 = 0.0 + var catolNull int = 1 + if len(atol) > 0 { + catolVal = atol[0] + catolNull = 0 + } +var crtolVal float64 = 0.0 + var crtolNull int = 1 + if len(rtol) > 0 { + crtolVal = rtol[0] + crtolNull = 0 + } +chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRankAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatrixRankAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var catolVal float64 = 0.0 + var catolNull int = 1 + if len(atol) > 0 { + catolVal = atol[0] + catolNull = 0 + } +var crtolVal float64 = 0.0 + var crtolNull int = 1 + if len(rtol) > 0 { + crtolVal = rtol[0] + crtolNull = 0 + } +chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRankAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgMatrixRankAtolRtolTensor(input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRankAtolRtolTensor(ptr, input.ctensor, atol.ctensor, rtol.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgMatrixRankAtolRtolTensorOut(out *Tensor, input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRankAtolRtolTensorOut(ptr, out.ctensor, input.ctensor, atol.ctensor, rtol.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgMatrixRankOut(out *Tensor, tol float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRankOut(ptr, out.ctensor, ts.ctensor, tol, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRankOutTolTensor(ptr, out.ctensor, input.ctensor, tol.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgMatrixRankTolTensor(ptr, input.ctensor, tol.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgMultiDot(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgLinalgMultiDot(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgMultiDotOut(out *Tensor, tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgLinalgMultiDotOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgLinalgNorm(ptr, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgLinalgNormOrdStr(ptr, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgLinalgNormOrdStrOut(ptr, out.ctensor, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgLinalgNormOut(ptr, out.ctensor, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinv(rcond float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinv(ptr, ts.ctensor, rcond, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinvAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var catolVal float64 = 0.0 + var catolNull int = 1 + if len(atol) > 0 { + catolVal = atol[0] + catolNull = 0 + } +var crtolVal float64 = 0.0 + var crtolNull int = 1 + if len(rtol) > 0 { + crtolVal = rtol[0] + crtolNull = 0 + } +chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinvAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinvAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var catolVal float64 = 0.0 + var catolNull int = 1 + if len(atol) > 0 { + catolVal = atol[0] + catolNull = 0 + } +var crtolVal float64 = 0.0 + var crtolNull int = 1 + if len(rtol) > 0 { + crtolVal = rtol[0] + crtolNull = 0 + } +chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinvAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinvAtolRtolTensor(atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinvAtolRtolTensor(ptr, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinvAtolRtolTensorOut(out *Tensor, atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinvAtolRtolTensorOut(ptr, out.ctensor, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinvOut(ptr, out.ctensor, ts.ctensor, rcond, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinvOutRcondTensor(ptr, out.ctensor, ts.ctensor, rcond.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chermitian := int32(0) + if hermitian { chermitian = int32(1) } + lib.AtgLinalgPinvRcondTensor(ptr, ts.ctensor, rcond.ctensor, chermitian) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgQr(mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgQr(ctensorPtr0, ts.ctensor, mode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgQrOut(q *Tensor, r *Tensor, mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgQrOut(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, mode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgSlogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgSlogdet(ctensorPtr0, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLinalgSlogdetOut(ctensorPtr0, sign.ctensor, logabsdet.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func LinalgSolve(input *Tensor, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgSolve(ptr, input.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgSolveOut(out *Tensor, input *Tensor, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgSolveOut(ptr, out.ctensor, input.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgSolveTriangular(b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +cleft := int32(0) + if left { cleft = int32(1) } +cunitriangular := int32(0) + if unitriangular { cunitriangular = int32(1) } + lib.AtgLinalgSolveTriangular(ptr, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgSolveTriangularOut(out *Tensor, b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +cleft := int32(0) + if left { cleft = int32(1) } +cunitriangular := int32(0) + if unitriangular { cunitriangular = int32(1) } + lib.AtgLinalgSolveTriangularOut(ptr, out.ctensor, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgSvd(a *Tensor, fullMatrices bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cfullMatrices := int32(0) + if fullMatrices { cfullMatrices = int32(1) } + lib.AtgLinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cfullMatrices := int32(0) + if fullMatrices { cfullMatrices = int32(1) } + lib.AtgLinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func LinalgSvdvals(a *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgSvdvals(ptr, a.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinalgSvdvalsOut(out *Tensor, a *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgSvdvalsOut(ptr, out.ctensor, a.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgTensorinv(ind int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgTensorinv(ptr, ts.ctensor, ind) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgTensorinvOut(out *Tensor, ind int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgTensorinvOut(ptr, out.ctensor, ts.ctensor, ind) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgTensorsolve(other *Tensor, dims []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgTensorsolve(ptr, ts.ctensor, other.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgTensorsolveOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Linear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinearOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Linspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log10(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log10_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Log10Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log1p(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1p(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log1p_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1p_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Log1pOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log2(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log2_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Log2Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Log_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LogNormal_(mean float64, std float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogNormal_(ptr, ts.ctensor, mean, std) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LogOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogSigmoid(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogSigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logaddexp(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexp(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logaddexp2(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexp2(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logaddexp2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexp2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogaddexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logcumsumexp(dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogcumsumexp(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logdet(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogdet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalAnd(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalAnd_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LogicalAndOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalNot(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNot(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalNot_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNot_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LogicalNotOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalOr(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalOr_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LogicalOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalXor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogicalXor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LogicalXorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logit(eps []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogit(ptr, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logit_(eps []float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogit_(ptr, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LogitBackward(gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogitBackward(ptr, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogitBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Logspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Logsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Lstm(input *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + var chx []lib.Ctensor + for _, t := range hx {chx = append(chx, t.ctensor)} + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.AtgLstm(ctensorPtr0, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func LstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var chx []lib.Ctensor + for _, t := range hx {chx = append(chx, t.ctensor)} + lib.AtgLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func LstmData(data *Tensor, batchSizes *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + var chx []lib.Ctensor + for _, t := range hx {chx = append(chx, t.ctensor)} + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } + lib.AtgLstmData(ctensorPtr0, data.ctensor, batchSizes.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) Lstsq(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLstsq(ctensorPtr0, ts.ctensor, a.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) LstsqX(x *Tensor, qr *Tensor, a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgLstsqX(ctensorPtr0, x.ctensor, qr.ctensor, ts.ctensor, a.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Lt(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Lt_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LtTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LtTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) LtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LuSolve(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) LuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cunpackData := int32(0) + if unpackData { cunpackData = int32(1) } +cunpackPivots := int32(0) + if unpackPivots { cunpackPivots = int32(1) } + lib.AtgLuUnpack(ctensorPtr0, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func LuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + cunpackData := int32(0) + if unpackData { cunpackData = int32(1) } +cunpackPivots := int32(0) + if unpackPivots { cunpackPivots = int32(1) } + lib.AtgLuUnpackOut(ctensorPtr0, p.ctensor, l.ctensor, u.ctensor, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func MarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaskedFill(mask *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaskedFill_(mask *Tensor, value *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MaskedFillTensor(mask *Tensor, value *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFillTensor(ptr, ts.ctensor, mask.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaskedFillTensor_(mask *Tensor, value *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFillTensor_(ptr, ts.ctensor, mask.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MaskedScatter(mask *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaskedScatter_(mask *Tensor, source *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MaskedSelect(mask *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelectBackward(ptr, grad.ctensor, input.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaskedSelectOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Matmul(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatmul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatrixExp(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixExp(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatrixExpBackward(grad *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixExpBackward(ptr, ts.ctensor, grad.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatrixH(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixH(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatrixPower(n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixPower(ptr, ts.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatrixRank(symmetric bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csymmetric := int32(0) + if symmetric { csymmetric = int32(1) } + lib.AtgMatrixRank(ptr, ts.ctensor, csymmetric) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MatrixRankTol(tol float64, symmetric bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csymmetric := int32(0) + if symmetric { csymmetric = int32(1) } + lib.AtgMatrixRankTol(ptr, ts.ctensor, tol, csymmetric) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Max(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMax(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMaxDimMax(ctensorPtr0, max.ctensor, maxValues.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MaxOther(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxOther(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool1dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool2dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool2dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool2dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool3dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool3dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMaxPool3dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MaxUnpool2d(indices *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxUnpool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxUnpool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Maximum(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaximum(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MaximumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaximumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mean(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMean(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMeanDim(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Median(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMedian(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Mh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Min(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MinDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMinDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMinDimMin(ctensorPtr0, min.ctensor, minIndices.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MinOther(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMinOther(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MinOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Minimum(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMinimum(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MinimumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMinimumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgMiopenBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func MiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgMiopenBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) MiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } + lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } + lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } + lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenRnn(input *Tensor, weight []Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0))) + + var cweight []lib.Ctensor + for _, t := range weight {cweight = append(cweight, t.ctensor)} +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } + lib.AtgMiopenRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, hx.ctensor, cx.ctensor, mode, hiddenSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, len(batchSizes), dropoutState.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, retVal3, retVal4, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal4 = &Tensor{ctensor: *ctensorPtr4} + + return retVal0, retVal1, retVal2, retVal3, retVal4, err +} + +func(ts *Tensor) Mish(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMish(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mish_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMish_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MishBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMishBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MishOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMishOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnAdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MkldnnLinear(weight *Tensor, bias *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnLinear(ptr, ts.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnLinearBackwardInput(ptr, inputSize, len(inputSize), gradOutput.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cbiasDefined := int32(0) + if biasDefined { cbiasDefined = int32(1) } + lib.AtgMkldnnLinearBackwardWeights(ctensorPtr0, gradOutput.ctensor, input.ctensor, weight.ctensor, cbiasDefined) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMkldnnMaxPool2dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMkldnnMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgMkldnnMaxPool3dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnReorderConv3dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mm(mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mode(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgMode(ctensorPtr0, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) ModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgModeValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Moveaxis(source []int64, destination []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMoveaxis(ptr, ts.ctensor, source, len(source), destination, len(destination)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MoveaxisInt(source int64, destination int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMoveaxisInt(ptr, ts.ctensor, source, destination) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Movedim(source []int64, destination []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMovedim(ptr, ts.ctensor, source, len(source), destination, len(destination)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MovedimInt(source int64, destination int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMovedimInt(ptr, ts.ctensor, source, destination) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MseLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MseLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Msort(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMsort(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MsortOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMsortOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mt(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMt(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mul(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mul_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MulScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMulScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MulScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMulScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultilabelMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Multinomial(numSamples int64, replacement bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + creplacement := int32(0) + if replacement { creplacement = int32(1) } + lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + creplacement := int32(0) + if replacement { creplacement = int32(1) } + lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Multiply(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiply(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Multiply_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiply_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MultiplyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiplyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultiplyScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiplyScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MultiplyScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiplyScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Mv(vec *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMv(ptr, ts.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) MvOut(out *Tensor, vec *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mvlgamma(p int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvlgamma(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Mvlgamma_(p int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvlgamma_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) MvlgammaOut(out *Tensor, p int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvlgammaOut(ptr, out.ctensor, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NanToNum(nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnanVal float64 = 0.0 + var cnanNull int = 1 + if len(nan) > 0 { + cnanVal = nan[0] + cnanNull = 0 + } +var cposinfVal float64 = 0.0 + var cposinfNull int = 1 + if len(posinf) > 0 { + cposinfVal = posinf[0] + cposinfNull = 0 + } +var cneginfVal float64 = 0.0 + var cneginfNull int = 1 + if len(neginf) > 0 { + cneginfVal = neginf[0] + cneginfNull = 0 + } + lib.AtgNanToNum(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NanToNum_(nan []float64, posinf []float64, neginf []float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnanVal float64 = 0.0 + var cnanNull int = 1 + if len(nan) > 0 { + cnanVal = nan[0] + cnanNull = 0 + } +var cposinfVal float64 = 0.0 + var cposinfNull int = 1 + if len(posinf) > 0 { + cposinfVal = posinf[0] + cposinfNull = 0 + } +var cneginfVal float64 = 0.0 + var cneginfNull int = 1 + if len(neginf) > 0 { + cneginfVal = neginf[0] + cneginfNull = 0 + } + lib.AtgNanToNum_(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnanVal float64 = 0.0 + var cnanNull int = 1 + if len(nan) > 0 { + cnanVal = nan[0] + cnanNull = 0 + } +var cposinfVal float64 = 0.0 + var cposinfNull int = 1 + if len(posinf) > 0 { + cposinfVal = posinf[0] + cposinfNull = 0 + } +var cneginfVal float64 = 0.0 + var cneginfNull int = 1 + if len(neginf) > 0 { + cneginfVal = neginf[0] + cneginfNull = 0 + } + lib.AtgNanToNumOut(ptr, out.ctensor, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Nanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanmean(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanmeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Nanmedian(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNanmedian(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NanmedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanmedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) NanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanmedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Nanquantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanquantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanquantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NanquantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanquantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNanquantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Nansum(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNansum(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NansumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNansumDimIntlist(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NansumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNansumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Narrow(dim int64, start int64, length int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrow(ptr, ts.ctensor, dim, start, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NarrowCopy(dim int64, start int64, length int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrowCopyOut(ptr, out.ctensor, ts.ctensor, dim, start, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NarrowTensor(dim int64, start *Tensor, length int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrowTensor(ptr, ts.ctensor, dim, start.ctensor, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgNativeBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func NativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgNativeBatchNormOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) NativeChannelShuffle(groups int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNativeChannelShuffle(ptr, ts.ctensor, groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NativeDropout(input *Tensor, p float64, train bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } + lib.AtgNativeDropout(ctensorPtr0, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func NativeDropoutBackward(gradOutput *Tensor, mask *Tensor, scale float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNativeDropoutBackward(ptr, gradOutput.ctensor, mask.ctensor, scale) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgNativeGroupNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, n, c, hxW, group, eps) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func NativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgNativeLayerNorm(ctensorPtr0, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) NativeNorm(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNativeNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNativeNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ne(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ne_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NeTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NeTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Neg(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeg(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Neg_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeg_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NegOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Negative(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegative(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Negative_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegative_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NegativeOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegativeOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewEmpty(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewEmptyStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewFull(ptr, ts.ctensor, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewOnes(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewZeros(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Nextafter(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNextafter(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Nextafter_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNextafter_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NextafterOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNextafterOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossNd(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Nonzero(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNonzero(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NonzeroOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Norm(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNormDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormExceptDim(v *Tensor, pow int64, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNormScalaroptDim(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormScalaroptDtype(ptr, ts.ctensor, p.cscalar, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Normal(out *Tensor, mean *Tensor, std float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormal(ptr, out.ctensor, mean.ctensor, std) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Normal_(mean float64, std float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormal_(ptr, ts.ctensor, mean, std) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func NormalFloatFloatOut(out *Tensor, mean float64, std float64, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalFloatFloatOut(ptr, out.ctensor, mean, std, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalFloatTensorOut(out *Tensor, mean float64, std *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalFloatTensorOut(ptr, out.ctensor, mean, std.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalTensorTensorOut(out *Tensor, mean *Tensor, std *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalTensorTensorOut(ptr, out.ctensor, mean.ctensor, std.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NotEqual(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqual(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NotEqual_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqual_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NotEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NotEqualTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqualTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NotEqualTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqualTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) NotEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NuclearNorm(keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NuclearNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNuclearNormDim(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNuclearNormDimOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NuclearNormOut(out *Tensor, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) NumpyT(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNumpyT(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) OneHot(numClasses int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOneHot(ptr, ts.ctensor, numClasses) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnes(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) OnesLike(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnesLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func OnesOut(out *Tensor, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnesOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Orgqr(input2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) OrgqrOut(out *Tensor, input2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ormqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cleft := int32(0) + if left { cleft = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } + lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) OrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cleft := int32(0) + if left { cleft = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } + lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Outer(vec2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOuter(ptr, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) OuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOuterOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) OutputNr(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgOutputNr(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func PadSequence(sequences []Tensor, batchFirst bool, paddingValue float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var csequences []lib.Ctensor + for _, t := range sequences {csequences = append(csequences, t.ctensor)} +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.AtgPadSequence(ptr, csequences, len(csequences), cbatchFirst, paddingValue) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Pdist(p float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPdist(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Permute(dims []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPermute(ptr, ts.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) PinMemory(device gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPinMemory(ptr, ts.ctensor, device.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Pinverse(rcond float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPinverse(ptr, ts.ctensor, rcond) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) PixelShuffle(upscaleFactor int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) PixelUnshuffle(downscaleFactor int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPixelUnshuffle(ptr, ts.ctensor, downscaleFactor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Poisson(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPoisson(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + clogInput := int32(0) + if logInput { clogInput = int32(1) } +cfull := int32(0) + if full { cfull = int32(1) } + lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Polar(abs *Tensor, angle *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolar(ptr, abs.ctensor, angle.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PolarOut(out *Tensor, abs *Tensor, angle *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolarOut(ptr, out.ctensor, abs.ctensor, angle.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Polygamma(n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygamma(ptr, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Polygamma_(n int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygamma_(ptr, ts.ctensor, n) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) PolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Positive(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPositive(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Pow(exponent *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Pow_(exponent *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func PowScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowScalar(ptr, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) PowTensor_(exponent *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowTensor_(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) PowTensorScalar(exponent *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowTensorScalar(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) PowTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) PowTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Prelu(weight *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) PreluBackward(gradOutput *Tensor, weight *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgPreluBackward(ctensorPtr0, gradOutput.ctensor, ts.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Prod(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgProd(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgProdDimInt(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgProdIntOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Put(index *Tensor, source *Tensor, accumulate bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + caccumulate := int32(0) + if accumulate { caccumulate = int32(1) } + lib.AtgPut(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Put_(index *Tensor, source *Tensor, accumulate bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + caccumulate := int32(0) + if accumulate { caccumulate = int32(1) } + lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) QPerChannelAxis(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgQPerChannelAxis(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) QPerChannelScales(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQPerChannelScales(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QPerChannelZeroPoints(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QScale(del bool)(retVal float64, err error) { +if del { defer ts.MustDrop() } + + retVal = lib.AtgQScale(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) QZeroPoint(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgQZeroPoint(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) Qr(some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + csome := int32(0) + if some { csome = int32(1) } + lib.AtgQr(ctensorPtr0, ts.ctensor, csome) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) QrQ(q *Tensor, r *Tensor, some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + csome := int32(0) + if some { csome = int32(1) } + lib.AtgQrQ(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, csome) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Quantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgQuantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgQuantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgQuantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgQuantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantizePerTensorDynamic(dtype gotch.DType, reduceRange bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + creduceRange := int32(0) + if reduceRange { creduceRange = int32(1) } + lib.AtgQuantizePerTensorDynamic(ptr, ts.ctensor, dtype.CInt(), creduceRange) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizePerTensorTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var chx []lib.Ctensor + for _, t := range hx {chx = append(chx, t.ctensor)} + lib.AtgQuantizedLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) QuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgQuantizedMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } + lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rad2deg(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRad2deg(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rad2deg_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRad2deg_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Rad2degOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRad2degOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRand(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RandLike(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandOut(out *Tensor, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandint(ptr, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RandintLike(high int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLike(ptr, ts.ctensor, high) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RandintLikeLowDtype(low int64, high int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLikeLowDtype(ptr, ts.ctensor, low, high) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLow(ptr, low, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandintLowOut(out *Tensor, low int64, high int64, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLowOut(ptr, out.ctensor, low, high, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandintOut(out *Tensor, high int64, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintOut(ptr, out.ctensor, high, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandn(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RandnLike(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandnLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandnOut(out *Tensor, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandnOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Random_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandom_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RandomFrom_(from int64, to []int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctoVal int64 = 0 + var ctoNull int = 1 + if len(to) > 0 { + ctoVal = to[0] + ctoNull = 0 + } + lib.AtgRandomFrom_(ptr, ts.ctensor, from, ctoVal, ctoNull) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RandomTo_(to int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandomTo_(ptr, ts.ctensor, to) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandpermOut(out *Tensor, n int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandpermOut(ptr, out.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Range(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RangeOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRangeStep(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Ravel(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRavel(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Real(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Reciprocal(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Reciprocal_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocal_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ReciprocalOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad1d(padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad2d(padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad3d(padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad3d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReflectionPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Relu(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Relu6(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu6(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Relu6_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu6_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Relu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Remainder(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Remainder_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RemainderScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RemainderScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderScalarTensor(ptr, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RemainderTensor(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderTensor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RemainderTensor_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderTensor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RemainderTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Renorm(p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Renorm_(p *Scalar, dim int64, maxnorm *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Repeat(repeats []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeat(ptr, ts.ctensor, repeats, len(repeats)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RepeatInterleave(repeats *Tensor, outputSize []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var coutputSizeVal int64 = 0 + var coutputSizeNull int = 1 + if len(outputSize) > 0 { + coutputSizeVal = outputSize[0] + coutputSizeNull = 0 + } + lib.AtgRepeatInterleave(ptr, repeats.ctensor, coutputSizeVal, coutputSizeNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +var coutputSizeVal int64 = 0 + var coutputSizeNull int = 1 + if len(outputSize) > 0 { + coutputSizeVal = outputSize[0] + coutputSizeNull = 0 + } + lib.AtgRepeatInterleaveSelfInt(ptr, ts.ctensor, repeats, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } +var coutputSizeVal int64 = 0 + var coutputSizeNull int = 1 + if len(outputSize) > 0 { + coutputSizeVal = outputSize[0] + coutputSizeNull = 0 + } + lib.AtgRepeatInterleaveSelfTensor(ptr, ts.ctensor, repeats.ctensor, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad1d(padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad2d(padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad3d(padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReplicationPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RequiresGrad_(requiresGrad bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + crequiresGrad := int32(0) + if requiresGrad { crequiresGrad = int32(1) } + lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Reshape(shape []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReshape(ptr, ts.ctensor, shape, len(shape)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ReshapeAs(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Resize_(size []int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResize_(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ResizeAs_(theTemplate *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ResizeAsSparse_(theTemplate *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResizeAsSparse_(ptr, ts.ctensor, theTemplate.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ResolveConj(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResolveConj(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ResolveNeg(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResolveNeg(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RetainsGrad(del bool)(retVal bool, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgRetainsGrad(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func RnnRelu(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.AtgRnnRelu(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func RnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } + lib.AtgRnnReluData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func RnnTanh(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } + lib.AtgRnnTanh(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func RnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var cparams []lib.Ctensor + for _, t := range params {cparams = append(cparams, t.ctensor)} +chasBiases := int32(0) + if hasBiases { chasBiases = int32(1) } +ctrain := int32(0) + if train { ctrain = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } + lib.AtgRnnTanhData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Roll(shifts []int64, dims []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoll(ptr, ts.ctensor, shifts, len(shifts), dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rot90(k int64, dims []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRot90(ptr, ts.ctensor, k, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Round(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRound(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Round_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRound_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RoundDecimals(decimals int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoundDecimals(ptr, ts.ctensor, decimals) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RoundDecimals_(decimals int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoundDecimals_(ptr, ts.ctensor, decimals) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RoundDecimalsOut(out *Tensor, decimals int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoundDecimalsOut(ptr, out.ctensor, ts.ctensor, decimals) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RoundOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RowStack(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgRowStack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RowStackOut(out *Tensor, tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgRowStackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rrelu(training bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgRrelu(ptr, ts.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rrelu_(training bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgRrelu_(ptr, ts.ctensor, ctraining) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RreluWithNoise(noise *Tensor, training bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RreluWithNoise_(noise *Tensor, training bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +cselfIsResult := int32(0) + if selfIsResult { cselfIsResult = int32(1) } + lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } + lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rsqrt(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrt(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rsqrt_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrt_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) RsqrtOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Rsub(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsub(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) RsubScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsubScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Scatter(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Scatter_(dim int64, index *Tensor, src *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ScatterAdd(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterAdd_(dim int64, index *Tensor, src *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterReduce(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterReduce_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterSrcOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterValue(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterValue(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterValue_(dim int64, index *Tensor, value *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterValue_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterValueOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterValueReduce(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterValueReduce_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterValueReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Searchsorted(sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +cright := int32(0) + if right { cright = int32(1) } + lib.AtgSearchsorted(ptr, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool, side string, sorter *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +cright := int32(0) + if right { cright = int32(1) } + lib.AtgSearchsortedScalar(ptr, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright, side, sorter.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { coutInt32 = int32(1) } +cright := int32(0) + if right { cright = int32(1) } + lib.AtgSearchsortedTensorOut(ptr, out.ctensor, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, axis int64, unsafety bool, initial *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunsafety := int32(0) + if unsafety { cunsafety = int32(1) } + lib.AtgSegmentReduce(ptr, data.ctensor, reduce, lengths.ctensor, indices.ctensor, axis, cunsafety, initial.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Select(dim int64, index int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelect(ptr, ts.ctensor, dim, index) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelectBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), dim, index) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SelectScatter(src *Tensor, dim int64, index int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelectScatter(ptr, ts.ctensor, src.ctensor, dim, index) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Selu(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Selu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Set_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSet_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SetRequiresGrad(r bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cr := int32(0) + if r { cr = int32(1) } + lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SetSourceTensor_(source *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSetSourceTensor_(ptr, ts.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Sgn(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSgn(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sgn_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSgn_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SgnOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSgnOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sigmoid(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sigmoid_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoid_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func SigmoidBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sign(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSign(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sign_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSign_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SignOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSignOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Signbit(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSignbit(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SignbitOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSignbitOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Silu(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSilu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Silu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSilu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SiluBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSiluBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSiluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SiluOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSiluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sin(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sin_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SinOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sinc(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sinc_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinc_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SincOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSincOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sinh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sinh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SinhOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Slice(dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cstartVal int64 = 0 + var cstartNull int = 1 + if len(start) > 0 { + cstartVal = start[0] + cstartNull = 0 + } +var cendVal int64 = 0 + var cendNull int = 1 + if len(end) > 0 { + cendVal = end[0] + cendNull = 0 + } + lib.AtgSlice(ptr, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSliceBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), dim, start, end, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SliceScatter(src *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cstartVal int64 = 0 + var cstartNull int = 1 + if len(start) > 0 { + cstartVal = start[0] + cstartNull = 0 + } +var cendVal int64 = 0 + var cendNull int = 1 + if len(end) > 0 { + cendVal = end[0] + cendNull = 0 + } + lib.AtgSliceScatter(ptr, ts.ctensor, src.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Slogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgSlogdet(ctensorPtr0, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) SlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Smm(mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction, beta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, beta) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Softmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Softplus(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplus(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftplusOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Softshrink(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrink(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SoftshrinkOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Solve(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgSolve(ctensorPtr0, ts.ctensor, a.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) SolveSolution(solution *Tensor, lu *Tensor, a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + lib.AtgSolveSolution(ctensorPtr0, solution.ctensor, lu.ctensor, ts.ctensor, a.ctensor) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Sort(dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cdescending := int32(0) + if descending { cdescending = int32(1) } + lib.AtgSort(ctensorPtr0, ts.ctensor, dim, cdescending) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) SortStable(stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cstable := int32(0) + if stable { cstable = int32(1) } +cdescending := int32(0) + if descending { cdescending = int32(1) } + lib.AtgSortStable(ctensorPtr0, ts.ctensor, cstable, dim, cdescending) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) SortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cdescending := int32(0) + if descending { cdescending = int32(1) } + lib.AtgSortValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, cdescending) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) SortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cstable := int32(0) + if stable { cstable = int32(1) } +cdescending := int32(0) + if descending { cdescending = int32(1) } + lib.AtgSortValuesStable(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, cstable, dim, cdescending) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensorIndices(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensorIndicesSize(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCsrTensor(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCsrTensorCrowColValueSize(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SparseDim(del bool)(retVal int64, err error) { + if del { defer ts.MustDrop() } + + retVal = lib.AtgSparseDim(ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + return retVal, err +} + +func(ts *Tensor) SparseMask(mask *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseResize_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SparseSampledAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseSampledAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SparseSampledAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseSampledAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialDigamma(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialDigamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialDigammaOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialDigammaOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialEntr(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialEntr(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialEntrOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialEntrOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErf(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErfOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErfc(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErfc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErfcOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErfcOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErfcx(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErfcx(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErfcxOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErfcxOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErfinv(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErfinv(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialErfinvOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialErfinvOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialExp2(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialExp2(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialExp2Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialExp2Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialExpit(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialExpit(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialExpitOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialExpitOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialExpm1(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialExpm1(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialExpm1Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialExpm1Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialGammainc(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialGammainc(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialGammaincOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialGammaincOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialGammaincc(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialGammaincc(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialGammainccOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialGammainccOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialGammaln(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialGammaln(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialGammalnOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialGammalnOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI0(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI0(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI0Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI0Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI0e(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI0e(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI0eOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI0eOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI1(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI1(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI1Out(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI1Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI1e(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI1e(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialI1eOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialI1eOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialLog1p(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialLog1p(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialLog1pOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialLog1pOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialLogit(eps []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgSpecialLogit(ptr, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgSpecialLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgSpecialLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgSpecialLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialMultigammaln(p int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialMultigammaln(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialMultigammalnOut(out *Tensor, p int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialMultigammalnOut(ptr, out.ctensor, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialNdtr(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialNdtr(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialNdtrOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialNdtrOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialNdtri(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialNdtri(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialNdtriOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialNdtriOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialPolygamma(n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialPolygamma(ptr, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialPolygammaOut(ptr, out.ctensor, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialPsi(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialPsi(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialPsiOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialPsiOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialRound(decimals int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialRound(ptr, ts.ctensor, decimals) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialRoundOut(out *Tensor, decimals int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialRoundOut(ptr, out.ctensor, ts.ctensor, decimals) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialSinc(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialSinc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialSincOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialSincOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlog1py(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlog1py(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlog1pyOtherScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlog1pyOtherScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlog1pyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlog1pyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlog1pyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlog1pySelfScalar(ptr, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlog1pySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlogy(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlogy(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlogyOtherScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlogyOtherScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlogyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialXlogyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlogyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlogySelfScalar(ptr, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialXlogySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialZeta(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialZeta(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialZetaOtherScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialZetaOtherScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialZetaOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SpecialZetaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialZetaOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialZetaSelfScalar(ptr, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSpecialZetaSelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sqrt(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrt(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sqrt_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrt_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SqrtOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Square(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSquare(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Square_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSquare_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SquareOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSquareOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Squeeze(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Squeeze_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SqueezeDim(dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueezeDim(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SqueezeDim_(dim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueezeDim_(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Sspaddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Stack(tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgStack(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func StackOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Std(unbiased bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } + lib.AtgStd(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) StdCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ccorrectionVal int64 = 0 + var ccorrectionNull int = 1 + if len(correction) > 0 { + ccorrectionVal = correction[0] + ccorrectionNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgStdCorrection(ptr, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) StdCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ccorrectionVal int64 = 0 + var ccorrectionNull int = 1 + if len(correction) > 0 { + ccorrectionVal = correction[0] + ccorrectionNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgStdCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) StdDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgStdDim(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) StdMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } + lib.AtgStdMean(ctensorPtr0, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) StdMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var ccorrectionVal int64 = 0 + var ccorrectionNull int = 1 + if len(correction) > 0 { + ccorrectionVal = correction[0] + ccorrectionNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgStdMeanCorrection(ctensorPtr0, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) StdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgStdMeanDim(ctensorPtr0, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) StdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Stft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var chopLengthVal int64 = 0 + var chopLengthNull int = 1 + if len(hopLength) > 0 { + chopLengthVal = hopLength[0] + chopLengthNull = 0 + } +var cwinLengthVal int64 = 0 + var cwinLengthNull int = 1 + if len(winLength) > 0 { + cwinLengthVal = winLength[0] + cwinLengthNull = 0 + } +cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +conesided := int32(0) + if onesided { conesided = int32(1) } +creturnComplex := int32(0) + if returnComplex { creturnComplex = int32(1) } + lib.AtgStft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, cnormalized, conesided, creturnComplex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sub(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Sub_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SubOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SubScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SubScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Subtract(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtract(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Subtract_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtract_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) SubtractOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtractOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SubtractScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtractScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SubtractScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtractScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Sum(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSum(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgSumDimIntlist(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgSumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) SumToSize(size []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSumToSize(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Svd(some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + csome := int32(0) + if some { csome = int32(1) } +ccomputeUv := int32(0) + if computeUv { ccomputeUv = int32(1) } + lib.AtgSvd(ctensorPtr0, ts.ctensor, csome, ccomputeUv) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) SvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + csome := int32(0) + if some { csome = int32(1) } +ccomputeUv := int32(0) + if computeUv { ccomputeUv = int32(1) } + lib.AtgSvdU(ctensorPtr0, u.ctensor, s.ctensor, v.ctensor, ts.ctensor, csome, ccomputeUv) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) Swapaxes(axis0 int64, axis1 int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSwapaxes(ptr, ts.ctensor, axis0, axis1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Swapaxes_(axis0 int64, axis1 int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSwapaxes_(ptr, ts.ctensor, axis0, axis1) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Swapdims(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSwapdims(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Swapdims_(dim0 int64, dim1 int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSwapdims_(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Symeig(eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ceigenvectors := int32(0) + if eigenvectors { ceigenvectors = int32(1) } +cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgSymeig(ctensorPtr0, ts.ctensor, ceigenvectors, cupper) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) SymeigE(e *Tensor, v *Tensor, eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + ceigenvectors := int32(0) + if eigenvectors { ceigenvectors = int32(1) } +cupper := int32(0) + if upper { cupper = int32(1) } + lib.AtgSymeigE(ctensorPtr0, e.ctensor, v.ctensor, ts.ctensor, ceigenvectors, cupper) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) T(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgT(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) T_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgT_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Take(index *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTake(ptr, ts.ctensor, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TakeAlongDim(indices *Tensor, dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgTakeAlongDim(ptr, ts.ctensor, indices.ctensor, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgTakeAlongDimOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TakeOut(out *Tensor, index *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Tan(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Tan_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) TanOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Tanh(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Tanh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func TanhBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func TanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Tensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTensordotOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Threshold(threshold *Scalar, value *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Threshold_(threshold *Scalar, value *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) ThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThresholdBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, threshold.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Tile(dims []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTile(ptr, ts.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) To(device gotch.Device, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTo(ptr, ts.ctensor, device.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToDense(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToDense(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ToDenseBackward(grad *Tensor, input *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } + lib.AtgToDevice(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } + lib.AtgToDtype(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } + lib.AtgToDtypeLayout(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToMkldnn(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToMkldnn(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ToMkldnnBackward(grad *Tensor, input *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToOther(other *Tensor, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } + lib.AtgToOther(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToSparse(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToSparse(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ToSparseSparseDim(sparseDim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToSparseSparseDim(ptr, ts.ctensor, sparseDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Topk(k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + clargest := int32(0) + if largest { clargest = int32(1) } +csorted := int32(0) + if sorted { csorted = int32(1) } + lib.AtgTopk(ctensorPtr0, ts.ctensor, k, dim, clargest, csorted) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) TopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + clargest := int32(0) + if largest { clargest = int32(1) } +csorted := int32(0) + if sorted { csorted = int32(1) } + lib.AtgTopkValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, clargest, csorted) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Totype(scalarType gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Trace(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrace(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func TraceBackward(grad *Tensor, sizes []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTraceBackward(ptr, grad.ctensor, sizes, len(sizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Transpose(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Transpose_(dim0 int64, dim1 int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func Trapezoid(y *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapezoid(ptr, y.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func TrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapezoidX(ptr, y.ctensor, x.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Trapz(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func TrapzDx(y *Tensor, dx float64, dim int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapzDx(ptr, y.ctensor, dx, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } +cunitriangular := int32(0) + if unitriangular { cunitriangular = int32(1) } + lib.AtgTriangularSolve(ctensorPtr0, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) TriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } +cunitriangular := int32(0) + if unitriangular { cunitriangular = int32(1) } + lib.AtgTriangularSolveX(ctensorPtr0, x.ctensor, m.ctensor, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) Tril(diagonal int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTril(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Tril_(diagonal int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTril_(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TrilOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func TripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cswap := int32(0) + if swap { cswap = int32(1) } + lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Triu(diagonal int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriu(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Triu_(diagonal int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriu_(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TriuOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TrueDivide(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TrueDivide_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) TrueDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TrueDivideScalar(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivideScalar(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TrueDivideScalar_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivideScalar_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) Trunc(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrunc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Trunc_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrunc_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) TruncOut(out *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) TypeAs(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Unflatten(dim int64, sizes []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnflatten(ptr, ts.ctensor, dim, sizes, len(sizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnfoldBackward(ptr, gradIn.ctensor, inputSizes, len(inputSizes), dim, size, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Uniform_(from float64, to float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUniform_(ptr, ts.ctensor, from, to) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) UniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + creturnInverse := int32(0) + if returnInverse { creturnInverse = int32(1) } +creturnCounts := int32(0) + if returnCounts { creturnCounts = int32(1) } +var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgUniqueConsecutive(ctensorPtr0, ts.ctensor, creturnInverse, creturnCounts, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) UniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + csorted := int32(0) + if sorted { csorted = int32(1) } +creturnInverse := int32(0) + if returnInverse { creturnInverse = int32(1) } +creturnCounts := int32(0) + if returnCounts { creturnCounts = int32(1) } + lib.AtgUniqueDim(ctensorPtr0, ts.ctensor, dim, csorted, creturnInverse, creturnCounts) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) UniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0))) + + creturnInverse := int32(0) + if returnInverse { creturnInverse = int32(1) } +creturnCounts := int32(0) + if returnCounts { creturnCounts = int32(1) } + lib.AtgUniqueDimConsecutive(ctensorPtr0, ts.ctensor, dim, creturnInverse, creturnCounts) + if err = TorchErr(); err != nil { + return retVal0, retVal1, retVal2, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal2 = &Tensor{ctensor: *ctensorPtr2} + + return retVal0, retVal1, retVal2, err +} + +func(ts *Tensor) Unsqueeze(dim int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnsqueeze(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Unsqueeze_(dim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnsqueeze_(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleNearest1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } +var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } +var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgValueSelectingReductionBackward(ptr, grad.ctensor, dim, indices.ctensor, sizes, len(sizes), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Values(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgValues(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Vander(x *Tensor, n []int64, increasing bool)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } +cincreasing := int32(0) + if increasing { cincreasing = int32(1) } + lib.AtgVander(ptr, x.ctensor, cnVal, cnNull, cincreasing) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Var(unbiased bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } + lib.AtgVar(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) VarCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ccorrectionVal int64 = 0 + var ccorrectionNull int = 1 + if len(correction) > 0 { + ccorrectionVal = correction[0] + ccorrectionNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgVarCorrection(ptr, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) VarCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ccorrectionVal int64 = 0 + var ccorrectionNull int = 1 + if len(correction) > 0 { + ccorrectionVal = correction[0] + ccorrectionNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgVarCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) VarDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgVarDim(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) VarMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } + lib.AtgVarMean(ctensorPtr0, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) VarMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + var ccorrectionVal int64 = 0 + var ccorrectionNull int = 1 + if len(correction) > 0 { + ccorrectionVal = correction[0] + ccorrectionNull = 0 + } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgVarMeanCorrection(ctensorPtr0, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) VarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { + if del { defer ts.MustDrop() } + ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgVarMeanDim(ctensorPtr0, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal0, retVal1, err + } + retVal0 = &Tensor{ctensor: *ctensorPtr0} + retVal1 = &Tensor{ctensor: *ctensorPtr1} + + return retVal0, retVal1, err +} + +func(ts *Tensor) VarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } + lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Vdot(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgVdot(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) VdotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgVdotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) View(size []int64, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgView(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ViewAs(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ViewAsComplex(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewAsComplex(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ViewAsReal(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewAsReal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ViewDtype(dtype gotch.DType, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewDtype(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Vstack(tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgVstack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func VstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} + lib.AtgVstackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func WhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhereScalar(ptr, condition.ctensor, selfScalar.cscalar, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) WhereScalarother(condition *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhereScalarother(ptr, condition.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func WhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhereScalarself(ptr, condition.ctensor, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) WhereSelf(condition *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhereSelf(ptr, condition.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Xlogy(other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogy(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Xlogy_(other *Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogy_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func(ts *Tensor) XlogyOutscalarOther(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogyOutscalarOther(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func XlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogyOutscalarSelf(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) XlogyOuttensor(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogyOuttensor(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) XlogyScalarOther(other *Scalar, del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogyScalarOther(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) XlogyScalarOther_(other *Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogyScalarOther_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func XlogyScalarSelf(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgXlogyScalarSelf(ptr, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) Zero_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZero_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + ts.ctensor = *ptr + + return err +} + +func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZeros(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts *Tensor) ZerosLike(del bool)(retVal *Tensor, err error) { + if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZerosLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ZerosOut(out *Tensor, size []int64)(retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZerosOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} +// End of implementing Tensor =================================