5488 lines
296 KiB
Go
5488 lines
296 KiB
Go
package libtch
|
|
|
|
// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!
|
|
|
|
//#include "stdbool.h"
|
|
//#include "torch_api.h"
|
|
import "C"
|
|
|
|
import "unsafe"
|
|
|
|
func Atg__And_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___and__(ptr, self, other )
|
|
}
|
|
func Atg__And1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___and__1(ptr, self, other)
|
|
}
|
|
func Atg__Iand_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___iand__(ptr, self, other )
|
|
}
|
|
func Atg__Iand1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___iand__1(ptr, self, other)
|
|
}
|
|
func Atg__Ilshift_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___ilshift__(ptr, self, other )
|
|
}
|
|
func Atg__Ilshift1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___ilshift__1(ptr, self, other)
|
|
}
|
|
func Atg__Ior_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___ior__(ptr, self, other )
|
|
}
|
|
func Atg__Ior1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___ior__1(ptr, self, other)
|
|
}
|
|
func Atg__Irshift_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___irshift__(ptr, self, other )
|
|
}
|
|
func Atg__Irshift1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___irshift__1(ptr, self, other)
|
|
}
|
|
func Atg__Ixor_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___ixor__(ptr, self, other )
|
|
}
|
|
func Atg__Ixor1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___ixor__1(ptr, self, other)
|
|
}
|
|
func Atg__Lshift_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___lshift__(ptr, self, other )
|
|
}
|
|
func Atg__Lshift1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___lshift__1(ptr, self, other)
|
|
}
|
|
func Atg__Or_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___or__(ptr, self, other )
|
|
}
|
|
func Atg__Or1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___or__1(ptr, self, other)
|
|
}
|
|
func Atg__Rshift_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___rshift__(ptr, self, other )
|
|
}
|
|
func Atg__Rshift1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___rshift__1(ptr, self, other)
|
|
}
|
|
func Atg__Xor_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg___xor__(ptr, self, other )
|
|
}
|
|
func Atg__Xor1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg___xor__1(ptr, self, other)
|
|
}
|
|
func Atg_AdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg__adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func Atg_AdaptiveAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){
|
|
C.atg__adaptive_avg_pool2d_backward(ptr, gradOutput, self)
|
|
}
|
|
func Atg_Addr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){
|
|
C.atg__addr(ptr, self, vec1, vec2)
|
|
}
|
|
func Atg_Addr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){
|
|
C.atg__addr_(ptr, self, vec1, vec2)
|
|
}
|
|
func Atg_AddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){
|
|
C.atg__addr_out(ptr, out, self, vec1, vec2)
|
|
}
|
|
func Atg_AmpUpdateScale(ptr *Ctensor, growthTracker Ctensor, currentScale Ctensor, foundInf Ctensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64){
|
|
cscaleGrowthFactor := *(*C.double)(unsafe.Pointer(&scaleGrowthFactor))
|
|
cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor))
|
|
cgrowthInterval := *(*C.int64_t)(unsafe.Pointer(&growthInterval))
|
|
C.atg__amp_update_scale(ptr, growthTracker, currentScale, foundInf, cscaleGrowthFactor, cscaleBackoffFactor, cgrowthInterval)
|
|
}
|
|
func Atg_BaddbmmMkl_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){
|
|
C.atg__baddbmm_mkl_(ptr, self, batch1, batch2)
|
|
}
|
|
func Atg_CastByte(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_byte(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_CastChar(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_char(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_CastDouble(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_double(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_CastFloat(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_float(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_CastHalf(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_half(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_CastInt(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_int(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_CastLong(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_long(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_CastShort(ptr *Ctensor, self Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__cast_short(ptr, self, cnonBlocking)
|
|
}
|
|
func Atg_Cat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__cat(ptr, ctensorsDataPtr, ctensorsLen, cdim)
|
|
}
|
|
func Atg_CatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim)
|
|
}
|
|
func Atg_CdistBackward(ptr *Ctensor, grad Ctensor, x1 Ctensor, x2 Ctensor, p float64, cdist Ctensor){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
C.atg__cdist_backward(ptr, grad, x1, x2, cp, cdist)
|
|
}
|
|
func Atg_CholeskyHelper(ptr *Ctensor, self Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg__cholesky_helper(ptr, self, cupper)
|
|
}
|
|
func Atg_CholeskySolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg__cholesky_solve_helper(ptr, self, a, cupper)
|
|
}
|
|
func Atg_Coalesced_(ptr *Ctensor, self Ctensor, coalesced int32){
|
|
ccoalesced := *(*C.int)(unsafe.Pointer(&coalesced))
|
|
C.atg__coalesced_(ptr, self, ccoalesced)
|
|
}
|
|
func Atg_Convolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
ctransposed := *(*C.int)(unsafe.Pointer(&transposed))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled))
|
|
C.atg__convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cbenchmark, cdeterministic, ccudnnEnabled)
|
|
}
|
|
func Atg_ConvolutionNogroup(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
ctransposed := *(*C.int)(unsafe.Pointer(&transposed))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
C.atg__convolution_nogroup(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen)
|
|
}
|
|
func Atg_CopyFrom(ptr *Ctensor, self Ctensor, dst Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg__copy_from(ptr, self, dst, cnonBlocking)
|
|
}
|
|
func Atg_CtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, zeroInfinity int32){
|
|
cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0]))
|
|
cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen))
|
|
ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0]))
|
|
ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen))
|
|
cblank := *(*C.int64_t)(unsafe.Pointer(&blank))
|
|
czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity))
|
|
C.atg__ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, czeroInfinity)
|
|
}
|
|
func Atg_CtcLossBackward(ptr *Ctensor, grad Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, negLogLikelihood Ctensor, logAlpha Ctensor, blank int64, zeroInfinity int32){
|
|
cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0]))
|
|
cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen))
|
|
ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0]))
|
|
ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen))
|
|
cblank := *(*C.int64_t)(unsafe.Pointer(&blank))
|
|
czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity))
|
|
C.atg__ctc_loss_backward(ptr, grad, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, negLogLikelihood, logAlpha, cblank, czeroInfinity)
|
|
}
|
|
func Atg_CudnnCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, deterministic int32, zeroInfinity int32){
|
|
cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0]))
|
|
cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen))
|
|
ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0]))
|
|
ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen))
|
|
cblank := *(*C.int64_t)(unsafe.Pointer(&blank))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity))
|
|
C.atg__cudnn_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, cdeterministic, czeroInfinity)
|
|
}
|
|
func Atg_CudnnInitDropoutState(ptr *Ctensor, dropout float64, train int32, dropoutSeed int64, optionsKind int32, optionsDevice int32){
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cdropoutSeed := *(*C.int64_t)(unsafe.Pointer(&dropoutSeed))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg__cudnn_init_dropout_state(ptr, cdropout, ctrain, cdropoutSeed, coptionsKind, coptionsDevice)
|
|
}
|
|
func Atg_CudnnRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, weightBuf Ctensor, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){
|
|
cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0]))
|
|
cweightLen := *(*C.int)(unsafe.Pointer(&weightLen))
|
|
cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0]))
|
|
cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen))
|
|
C.atg__cudnn_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, weightBuf, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState)
|
|
}
|
|
func Atg_CudnnRnnFlattenWeight(ptr *Ctensor, weightArrData []Ctensor, weightArrLen int, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, bidirectional int32){
|
|
cweightArrDataPtr := (*Ctensor)(unsafe.Pointer(&weightArrData[0]))
|
|
cweightArrLen := *(*C.int)(unsafe.Pointer(&weightArrLen))
|
|
cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0))
|
|
cinputSize := *(*C.int64_t)(unsafe.Pointer(&inputSize))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
C.atg__cudnn_rnn_flatten_weight(ptr, cweightArrDataPtr, cweightArrLen, cweightStride0, cinputSize, cmode, chiddenSize, cnumLayers, cbatchFirst, cbidirectional)
|
|
}
|
|
func Atg_Cumprod(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__cumprod(ptr, self, cdim)
|
|
}
|
|
func Atg_CumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__cumprod_out(ptr, out, self, cdim)
|
|
}
|
|
func Atg_Cumsum(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__cumsum(ptr, self, cdim)
|
|
}
|
|
func Atg_CumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__cumsum_out(ptr, out, self, cdim)
|
|
}
|
|
func Atg_DimArange(ptr *Ctensor, like Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__dim_arange(ptr, like, cdim)
|
|
}
|
|
func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor){
|
|
C.atg__dirichlet_grad(ptr, x, alpha, total)
|
|
}
|
|
func Atg_EmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
csparse := *(*C.int)(unsafe.Pointer(&sparse))
|
|
cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset))
|
|
C.atg__embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset)
|
|
}
|
|
func Atg_EmbeddingBagBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor){
|
|
cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights))
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
csparse := *(*C.int)(unsafe.Pointer(&sparse))
|
|
C.atg__embedding_bag_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, csparse, perSampleWeights)
|
|
}
|
|
func Atg_EmbeddingBagDenseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){
|
|
cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights))
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
C.atg__embedding_bag_dense_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights)
|
|
}
|
|
func Atg_EmbeddingBagPerSampleWeightsBackward(ptr *Ctensor, grad Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, mode int64){
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
C.atg__embedding_bag_per_sample_weights_backward(ptr, grad, weight, indices, offsets, offset2bag, cmode)
|
|
}
|
|
func Atg_EmbeddingBagSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){
|
|
cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights))
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
C.atg__embedding_bag_sparse_backward(ptr, grad, indices, offsets, offset2bag, bagSize, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights)
|
|
}
|
|
func Atg_EmptyAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32, scale float64, zeroPoint int64){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
cscale := *(*C.double)(unsafe.Pointer(&scale))
|
|
czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint))
|
|
C.atg__empty_affine_quantized(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice, cscale, czeroPoint)
|
|
}
|
|
func Atg_EmptyPerChannelAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, scales Ctensor, zeroPoints Ctensor, axis int64, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
caxis := *(*C.int64_t)(unsafe.Pointer(&axis))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg__empty_per_channel_affine_quantized(ptr, csizeDataPtr, csizeLen, scales, zeroPoints, caxis, coptionsKind, coptionsDevice)
|
|
}
|
|
func Atg_FftWithSize(ptr *Ctensor, self Ctensor, signalNdim int64, complexInput int32, complexOutput int32, inverse int32, checkedSignalSizesData []int64, checkedSignalSizesLen int, normalized int32, onesided int32, outputSizesData []int64, outputSizesLen int){
|
|
csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim))
|
|
ccomplexInput := *(*C.int)(unsafe.Pointer(&complexInput))
|
|
ccomplexOutput := *(*C.int)(unsafe.Pointer(&complexOutput))
|
|
cinverse := *(*C.int)(unsafe.Pointer(&inverse))
|
|
ccheckedSignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&checkedSignalSizesData[0]))
|
|
ccheckedSignalSizesLen := *(*C.int)(unsafe.Pointer(&checkedSignalSizesLen))
|
|
cnormalized := *(*C.int)(unsafe.Pointer(&normalized))
|
|
conesided := *(*C.int)(unsafe.Pointer(&onesided))
|
|
coutputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizesData[0]))
|
|
coutputSizesLen := *(*C.int)(unsafe.Pointer(&outputSizesLen))
|
|
C.atg__fft_with_size(ptr, self, csignalNdim, ccomplexInput, ccomplexOutput, cinverse, ccheckedSignalSizesDataPtr, ccheckedSignalSizesLen, cnormalized, conesided, coutputSizesDataPtr, coutputSizesLen)
|
|
}
|
|
func Atg_FusedDropout(ptr *Ctensor, self Ctensor, p float64){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
C.atg__fused_dropout(ptr, self, cp)
|
|
}
|
|
func Atg_GatherSparseBackward(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, grad Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__gather_sparse_backward(ptr, self, cdim, index, grad)
|
|
}
|
|
func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__index_copy_(ptr, self, cdim, index, source)
|
|
}
|
|
func Atg_IndexPutImpl_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32, unsafety int32){
|
|
cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0]))
|
|
cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen))
|
|
caccumulate := *(*C.int)(unsafe.Pointer(&accumulate))
|
|
cunsafety := *(*C.int)(unsafe.Pointer(&unsafety))
|
|
C.atg__index_put_impl_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate, cunsafety)
|
|
}
|
|
func Atg_Indices(ptr *Ctensor, self Ctensor){
|
|
C.atg__indices(ptr, self)
|
|
}
|
|
func Atg_InverseHelper(ptr *Ctensor, self Ctensor){
|
|
C.atg__inverse_helper(ptr, self)
|
|
}
|
|
func Atg_LogSoftmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat))
|
|
C.atg__log_softmax(ptr, self, cdim, chalfToFloat)
|
|
}
|
|
func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, self)
|
|
}
|
|
func Atg_LuSolveHelper(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){
|
|
C.atg__lu_solve_helper(ptr, self, lUData, lUPivots)
|
|
}
|
|
func Atg_LuWithInfo(ptr *Ctensor, self Ctensor, pivot int32, checkErrors int32){
|
|
cpivot := *(*C.int)(unsafe.Pointer(&pivot))
|
|
ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors))
|
|
C.atg__lu_with_info(ptr, self, cpivot, ccheckErrors)
|
|
}
|
|
func Atg_MakePerChannelQuantizedTensor(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64){
|
|
caxis := *(*C.int64_t)(unsafe.Pointer(&axis))
|
|
C.atg__make_per_channel_quantized_tensor(ptr, self, scale, zeroPoint, caxis)
|
|
}
|
|
func Atg_MakePerTensorQuantizedTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64){
|
|
cscale := *(*C.double)(unsafe.Pointer(&scale))
|
|
czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint))
|
|
C.atg__make_per_tensor_quantized_tensor(ptr, self, cscale, czeroPoint)
|
|
}
|
|
func Atg_MaskedScale(ptr *Ctensor, self Ctensor, mask Ctensor, scale float64){
|
|
cscale := *(*C.double)(unsafe.Pointer(&scale))
|
|
C.atg__masked_scale(ptr, self, mask, cscale)
|
|
}
|
|
func Atg_Max(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg__max(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func Atg_MaxOut(ptr *Ctensor, max Ctensor, maxIndices Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg__max_out(ptr, max, maxIndices, self, cdim, ckeepdim)
|
|
}
|
|
func Atg_Min(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg__min(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func Atg_MinOut(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg__min_out(ptr, min, minIndices, self, cdim, ckeepdim)
|
|
}
|
|
func Atg_MkldnnReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){
|
|
cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0]))
|
|
cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen))
|
|
C.atg__mkldnn_reshape(ptr, self, cshapeDataPtr, cshapeLen)
|
|
}
|
|
func Atg_MkldnnTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){
|
|
cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0))
|
|
cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1))
|
|
C.atg__mkldnn_transpose(ptr, self, cdim0, cdim1)
|
|
}
|
|
func Atg_MkldnnTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){
|
|
cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0))
|
|
cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1))
|
|
C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1)
|
|
}
|
|
func Atg_Mode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg__mode(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func Atg_ModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg__mode_out(ptr, values, indices, self, cdim, ckeepdim)
|
|
}
|
|
func Atg_MultinomialAliasDraw(ptr *Ctensor, j Ctensor, q Ctensor, numSamples int64){
|
|
cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples))
|
|
C.atg__multinomial_alias_draw(ptr, j, q, cnumSamples)
|
|
}
|
|
func Atg_MultinomialAliasSetup(ptr *Ctensor, probs Ctensor){
|
|
C.atg__multinomial_alias_setup(ptr, probs)
|
|
}
|
|
func Atg_NnpackSpatialConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg__nnpack_spatial_convolution(ptr, input, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func Atg_NnpackSpatialConvolutionBackwardInput(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg__nnpack_spatial_convolution_backward_input(ptr, input, gradOutput, weight, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func Atg_NnpackSpatialConvolutionBackwardWeight(ptr *Ctensor, input Ctensor, weightsizeData []int64, weightsizeLen int, gradOutput Ctensor, paddingData []int64, paddingLen int){
|
|
cweightsizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightsizeData[0]))
|
|
cweightsizeLen := *(*C.int)(unsafe.Pointer(&weightsizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func Atg_PackPaddedSequence(ptr *Ctensor, input Ctensor, lengths Ctensor, batchFirst int32){
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
C.atg__pack_padded_sequence(ptr, input, lengths, cbatchFirst)
|
|
}
|
|
func Atg_PackPaddedSequenceBackward(ptr *Ctensor, grad Ctensor, inputSizeData []int64, inputSizeLen int, batchSizes Ctensor, batchFirst int32){
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
C.atg__pack_padded_sequence_backward(ptr, grad, cinputSizeDataPtr, cinputSizeLen, batchSizes, cbatchFirst)
|
|
}
|
|
func Atg_PadPackedSequence(ptr *Ctensor, data Ctensor, batchSizes Ctensor, batchFirst int32, paddingValue Cscalar, totalLength int64){
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
ctotalLength := *(*C.int64_t)(unsafe.Pointer(&totalLength))
|
|
C.atg__pad_packed_sequence(ptr, data, batchSizes, cbatchFirst, paddingValue , ctotalLength)
|
|
}
|
|
func Atg_PdistBackward(ptr *Ctensor, grad Ctensor, self Ctensor, p float64, pdist Ctensor){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
C.atg__pdist_backward(ptr, grad, self, cp, pdist)
|
|
}
|
|
func Atg_QrHelper(ptr *Ctensor, self Ctensor, some int32){
|
|
csome := *(*C.int)(unsafe.Pointer(&some))
|
|
C.atg__qr_helper(ptr, self, csome)
|
|
}
|
|
func Atg_ReshapeFromTensor(ptr *Ctensor, self Ctensor, shape Ctensor){
|
|
C.atg__reshape_from_tensor(ptr, self, shape)
|
|
}
|
|
func Atg_SWhere(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){
|
|
C.atg__s_where(ptr, condition, self, other)
|
|
}
|
|
func Atg_SampleDirichlet(ptr *Ctensor, self Ctensor){
|
|
C.atg__sample_dirichlet(ptr, self)
|
|
}
|
|
func Atg_ShapeAsTensor(ptr *Ctensor, self Ctensor){
|
|
C.atg__shape_as_tensor(ptr, self)
|
|
}
|
|
func Atg_SobolEngineDraw(ptr *Ctensor, quasi Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64, dtype int32){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension))
|
|
cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg__sobol_engine_draw(ptr, quasi, cn, sobolstate, cdimension, cnumGenerated, cdtype)
|
|
}
|
|
func Atg_SobolEngineFf_(ptr *Ctensor, self Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension))
|
|
cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated))
|
|
C.atg__sobol_engine_ff_(ptr, self, cn, sobolstate, cdimension, cnumGenerated)
|
|
}
|
|
func Atg_SobolEngineInitializeState_(ptr *Ctensor, self Ctensor, dimension int64){
|
|
cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension))
|
|
C.atg__sobol_engine_initialize_state_(ptr, self, cdimension)
|
|
}
|
|
func Atg_SobolEngineScramble_(ptr *Ctensor, self Ctensor, ltm Ctensor, dimension int64){
|
|
cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension))
|
|
C.atg__sobol_engine_scramble_(ptr, self, ltm, cdimension)
|
|
}
|
|
func Atg_Softmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat))
|
|
C.atg__softmax(ptr, self, cdim, chalfToFloat)
|
|
}
|
|
func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, self)
|
|
}
|
|
func Atg_SolveHelper(ptr *Ctensor, self Ctensor, a Ctensor){
|
|
C.atg__solve_helper(ptr, self, a)
|
|
}
|
|
func Atg_SparseAddmm(ptr *Ctensor, self Ctensor, sparse Ctensor, dense Ctensor){
|
|
C.atg__sparse_addmm(ptr, self, sparse, dense)
|
|
}
|
|
func Atg_SparseCooTensorUnsafe(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg__sparse_coo_tensor_unsafe(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func Atg_SparseCooTensorWithDims(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim))
|
|
cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim))
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg__sparse_coo_tensor_with_dims(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func Atg_SparseCooTensorWithDimsAndTensors(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){
|
|
csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim))
|
|
cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim))
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg__sparse_coo_tensor_with_dims_and_tensors(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, indices, values, coptionsKind, coptionsDevice)
|
|
}
|
|
func Atg_SparseMm(ptr *Ctensor, sparse Ctensor, dense Ctensor){
|
|
C.atg__sparse_mm(ptr, sparse, dense)
|
|
}
|
|
func Atg_SparseSum(ptr *Ctensor, self Ctensor){
|
|
C.atg__sparse_sum(ptr, self)
|
|
}
|
|
func Atg_SparseSum1(ptr *Ctensor, self Ctensor, dtype int32){
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg__sparse_sum1(ptr, self, cdtype)
|
|
}
|
|
func Atg_SparseSum2(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
C.atg__sparse_sum2(ptr, self, cdimDataPtr, cdimLen)
|
|
}
|
|
func Atg_SparseSum3(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, dtype int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg__sparse_sum3(ptr, self, cdimDataPtr, cdimLen, cdtype)
|
|
}
|
|
func Atg_SparseSumBackward(ptr *Ctensor, grad Ctensor, self Ctensor, dimData []int64, dimLen int){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
C.atg__sparse_sum_backward(ptr, grad, self, cdimDataPtr, cdimLen)
|
|
}
|
|
func Atg_StandardGamma(ptr *Ctensor, self Ctensor){
|
|
C.atg__standard_gamma(ptr, self)
|
|
}
|
|
func Atg_StandardGammaGrad(ptr *Ctensor, self Ctensor, output Ctensor){
|
|
C.atg__standard_gamma_grad(ptr, self, output)
|
|
}
|
|
func Atg_Std(ptr *Ctensor, self Ctensor, unbiased int32){
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
C.atg__std(ptr, self, cunbiased)
|
|
}
|
|
func Atg_SvdHelper(ptr *Ctensor, self Ctensor, some int32, computeUv int32){
|
|
csome := *(*C.int)(unsafe.Pointer(&some))
|
|
ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv))
|
|
C.atg__svd_helper(ptr, self, csome, ccomputeUv)
|
|
}
|
|
func Atg_SymeigHelper(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){
|
|
ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors))
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg__symeig_helper(ptr, self, ceigenvectors, cupper)
|
|
}
|
|
func Atg_TriangularSolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
ctranspose := *(*C.int)(unsafe.Pointer(&transpose))
|
|
cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular))
|
|
C.atg__triangular_solve_helper(ptr, self, a, cupper, ctranspose, cunitriangular)
|
|
}
|
|
func Atg_Trilinear(ptr *Ctensor, i1 Ctensor, i2 Ctensor, i3 Ctensor, expand1Data []int64, expand1Len int, expand2Data []int64, expand2Len int, expand3Data []int64, expand3Len int, sumdimData []int64, sumdimLen int, unrollDim int64){
|
|
cexpand1DataPtr := (*C.int64_t)(unsafe.Pointer(&expand1Data[0]))
|
|
cexpand1Len := *(*C.int)(unsafe.Pointer(&expand1Len))
|
|
cexpand2DataPtr := (*C.int64_t)(unsafe.Pointer(&expand2Data[0]))
|
|
cexpand2Len := *(*C.int)(unsafe.Pointer(&expand2Len))
|
|
cexpand3DataPtr := (*C.int64_t)(unsafe.Pointer(&expand3Data[0]))
|
|
cexpand3Len := *(*C.int)(unsafe.Pointer(&expand3Len))
|
|
csumdimDataPtr := (*C.int64_t)(unsafe.Pointer(&sumdimData[0]))
|
|
csumdimLen := *(*C.int)(unsafe.Pointer(&sumdimLen))
|
|
cunrollDim := *(*C.int64_t)(unsafe.Pointer(&unrollDim))
|
|
C.atg__trilinear(ptr, i1, i2, i3, cexpand1DataPtr, cexpand1Len, cexpand2DataPtr, cexpand2Len, cexpand3DataPtr, cexpand3Len, csumdimDataPtr, csumdimLen, cunrollDim)
|
|
}
|
|
func Atg_Unique(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32){
|
|
csorted := *(*C.int)(unsafe.Pointer(&sorted))
|
|
creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse))
|
|
C.atg__unique(ptr, self, csorted, creturnInverse)
|
|
}
|
|
func Atg_Unique2(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32, returnCounts int32){
|
|
csorted := *(*C.int)(unsafe.Pointer(&sorted))
|
|
creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse))
|
|
creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts))
|
|
C.atg__unique2(ptr, self, csorted, creturnInverse, creturnCounts)
|
|
}
|
|
func Atg_UnsafeView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen)
|
|
}
|
|
func Atg_Values(ptr *Ctensor, self Ctensor){
|
|
C.atg__values(ptr, self)
|
|
}
|
|
func Atg_Var(ptr *Ctensor, self Ctensor, unbiased int32){
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
C.atg__var(ptr, self, cunbiased)
|
|
}
|
|
func Atg_WeightNorm(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__weight_norm(ptr, v, g, cdim)
|
|
}
|
|
func Atg_WeightNormCudaInterface(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__weight_norm_cuda_interface(ptr, v, g, cdim)
|
|
}
|
|
func Atg_WeightNormCudaInterfaceBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__weight_norm_cuda_interface_backward(ptr, gradW, savedV, savedG, savedNorms, cdim)
|
|
}
|
|
func Atg_WeightNormDifferentiableBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg__weight_norm_differentiable_backward(ptr, gradW, savedV, savedG, savedNorms, cdim)
|
|
}
|
|
func AtgAbs(ptr *Ctensor, self Ctensor){
|
|
C.atg_abs(ptr, self)
|
|
}
|
|
func AtgAbs_(ptr *Ctensor, self Ctensor){
|
|
C.atg_abs_(ptr, self)
|
|
}
|
|
func AtgAbsOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_abs_out(ptr, out, self)
|
|
}
|
|
func AtgAcos(ptr *Ctensor, self Ctensor){
|
|
C.atg_acos(ptr, self)
|
|
}
|
|
func AtgAcos_(ptr *Ctensor, self Ctensor){
|
|
C.atg_acos_(ptr, self)
|
|
}
|
|
func AtgAcosOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_acos_out(ptr, out, self)
|
|
}
|
|
func AtgAdaptiveAvgPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_avg_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_avg_pool2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveAvgPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_avg_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){
|
|
C.atg_adaptive_avg_pool3d_backward(ptr, gradOutput, self)
|
|
}
|
|
func AtgAdaptiveAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor){
|
|
C.atg_adaptive_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self)
|
|
}
|
|
func AtgAdaptiveAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_avg_pool3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveMaxPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_max_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveMaxPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_max_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){
|
|
C.atg_adaptive_max_pool2d_backward(ptr, gradOutput, self, indices)
|
|
}
|
|
func AtgAdaptiveMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){
|
|
C.atg_adaptive_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, indices)
|
|
}
|
|
func AtgAdaptiveMaxPool2dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_max_pool2d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveMaxPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_max_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdaptiveMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){
|
|
C.atg_adaptive_max_pool3d_backward(ptr, gradOutput, self, indices)
|
|
}
|
|
func AtgAdaptiveMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){
|
|
C.atg_adaptive_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, indices)
|
|
}
|
|
func AtgAdaptiveMaxPool3dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_adaptive_max_pool3d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgAdd(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_add(ptr, self, other)
|
|
}
|
|
func AtgAdd1(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_add1(ptr, self, other )
|
|
}
|
|
func AtgAdd_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_add_(ptr, self, other)
|
|
}
|
|
func AtgAdd1_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_add_1(ptr, self, other )
|
|
}
|
|
func AtgAddOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_add_out(ptr, out, self, other)
|
|
}
|
|
func AtgAddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){
|
|
C.atg_addbmm(ptr, self, batch1, batch2)
|
|
}
|
|
func AtgAddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){
|
|
C.atg_addbmm_(ptr, self, batch1, batch2)
|
|
}
|
|
func AtgAddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){
|
|
C.atg_addbmm_out(ptr, out, self, batch1, batch2)
|
|
}
|
|
func AtgAddcdiv(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){
|
|
C.atg_addcdiv(ptr, self, tensor1, tensor2)
|
|
}
|
|
func AtgAddcdiv_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){
|
|
C.atg_addcdiv_(ptr, self, tensor1, tensor2)
|
|
}
|
|
func AtgAddcdivOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){
|
|
C.atg_addcdiv_out(ptr, out, self, tensor1, tensor2)
|
|
}
|
|
func AtgAddcmul(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){
|
|
C.atg_addcmul(ptr, self, tensor1, tensor2)
|
|
}
|
|
func AtgAddcmul_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){
|
|
C.atg_addcmul_(ptr, self, tensor1, tensor2)
|
|
}
|
|
func AtgAddcmulOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){
|
|
C.atg_addcmul_out(ptr, out, self, tensor1, tensor2)
|
|
}
|
|
func AtgAddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){
|
|
C.atg_addmm(ptr, self, mat1, mat2)
|
|
}
|
|
func AtgAddmm_(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){
|
|
C.atg_addmm_(ptr, self, mat1, mat2)
|
|
}
|
|
func AtgAddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){
|
|
C.atg_addmm_out(ptr, out, self, mat1, mat2)
|
|
}
|
|
func AtgAddmv(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){
|
|
C.atg_addmv(ptr, self, mat, vec)
|
|
}
|
|
func AtgAddmv_(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){
|
|
C.atg_addmv_(ptr, self, mat, vec)
|
|
}
|
|
func AtgAddmvOut(ptr *Ctensor, out Ctensor, self Ctensor, mat Ctensor, vec Ctensor){
|
|
C.atg_addmv_out(ptr, out, self, mat, vec)
|
|
}
|
|
func AtgAddr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){
|
|
C.atg_addr(ptr, self, vec1, vec2)
|
|
}
|
|
func AtgAddr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){
|
|
C.atg_addr_(ptr, self, vec1, vec2)
|
|
}
|
|
func AtgAddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){
|
|
C.atg_addr_out(ptr, out, self, vec1, vec2)
|
|
}
|
|
func AtgAffineGridGenerator(ptr *Ctensor, theta Ctensor, sizeData []int64, sizeLen int, alignCorners int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
C.atg_affine_grid_generator(ptr, theta, csizeDataPtr, csizeLen, calignCorners)
|
|
}
|
|
func AtgAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, sizeData []int64, sizeLen int, alignCorners int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
C.atg_affine_grid_generator_backward(ptr, grad, csizeDataPtr, csizeLen, calignCorners)
|
|
}
|
|
func AtgAlias(ptr *Ctensor, self Ctensor){
|
|
C.atg_alias(ptr, self)
|
|
}
|
|
func AtgAlignAs(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_align_as(ptr, self, other)
|
|
}
|
|
|
|
func AtgAll(ptr *Ctensor, self Ctensor){
|
|
C.atg_all(ptr, self)
|
|
}
|
|
func AtgAll1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_all1(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgAllOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_all_out(ptr, out, self, cdim, ckeepdim)
|
|
}
|
|
func AtgAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_alpha_dropout(ptr, input, cp, ctrain)
|
|
}
|
|
func AtgAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_alpha_dropout_(ptr, self, cp, ctrain)
|
|
}
|
|
func AtgAngle(ptr *Ctensor, self Ctensor){
|
|
C.atg_angle(ptr, self)
|
|
}
|
|
func AtgAngleOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_angle_out(ptr, out, self)
|
|
}
|
|
func AtgAny(ptr *Ctensor, self Ctensor){
|
|
C.atg_any(ptr, self)
|
|
}
|
|
func AtgAny1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_any1(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgAnyOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_any_out(ptr, out, self, cdim, ckeepdim)
|
|
}
|
|
func AtgArange(ptr *Ctensor, end Cscalar, optionsKind int32, optionsDevice int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_arange(ptr, end , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgArange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_arange1(ptr, start , end , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgArange2(ptr *Ctensor, start Cscalar, end Cscalar, step Cscalar, optionsKind int32, optionsDevice int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_arange2(ptr, start , end , step , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgArangeOut(ptr *Ctensor, out Ctensor, end Cscalar){
|
|
C.atg_arange_out(ptr, out, end )
|
|
}
|
|
func AtgArangeOut1(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){
|
|
C.atg_arange_out1(ptr, out, start , end )
|
|
}
|
|
func AtgArgmax(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_argmax(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgArgmin(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_argmin(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgArgsort(ptr *Ctensor, self Ctensor, dim int64, descending int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdescending := *(*C.int)(unsafe.Pointer(&descending))
|
|
C.atg_argsort(ptr, self, cdim, cdescending)
|
|
}
|
|
func AtgAsStrided(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset))
|
|
C.atg_as_strided(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset)
|
|
}
|
|
func AtgAsStrided_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset))
|
|
C.atg_as_strided_(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset)
|
|
}
|
|
func AtgAsin(ptr *Ctensor, self Ctensor){
|
|
C.atg_asin(ptr, self)
|
|
}
|
|
func AtgAsin_(ptr *Ctensor, self Ctensor){
|
|
C.atg_asin_(ptr, self)
|
|
}
|
|
func AtgAsinOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_asin_out(ptr, out, self)
|
|
}
|
|
func AtgAtan(ptr *Ctensor, self Ctensor){
|
|
C.atg_atan(ptr, self)
|
|
}
|
|
func AtgAtan2(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_atan2(ptr, self, other)
|
|
}
|
|
func AtgAtan2_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_atan2_(ptr, self, other)
|
|
}
|
|
func AtgAtan2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_atan2_out(ptr, out, self, other)
|
|
}
|
|
func AtgAtan_(ptr *Ctensor, self Ctensor){
|
|
C.atg_atan_(ptr, self)
|
|
}
|
|
func AtgAtanOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_atan_out(ptr, out, self)
|
|
}
|
|
func AtgAvgPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
C.atg_avg_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad)
|
|
}
|
|
func AtgAvgPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgAvgPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool2d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgAvgPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad))
|
|
cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride))
|
|
C.atg_avg_pool3d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride)
|
|
}
|
|
func AtgBaddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){
|
|
C.atg_baddbmm(ptr, self, batch1, batch2)
|
|
}
|
|
func AtgBaddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){
|
|
C.atg_baddbmm_(ptr, self, batch1, batch2)
|
|
}
|
|
func AtgBaddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){
|
|
C.atg_baddbmm_out(ptr, out, self, batch1, batch2)
|
|
}
|
|
func AtgBartlettWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_bartlett_window(ptr, cwindowLength, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgBartlettWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
cperiodic := *(*C.int)(unsafe.Pointer(&periodic))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_bartlett_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64, cudnnEnabled int32){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
cmomentum := *(*C.double)(unsafe.Pointer(&momentum))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled))
|
|
C.atg_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps, ccudnnEnabled)
|
|
}
|
|
func AtgBatchNormBackwardElemt(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, meanDy Ctensor, meanDyXmu Ctensor){
|
|
C.atg_batch_norm_backward_elemt(ptr, gradOut, input, mean, invstd, weight, meanDy, meanDyXmu)
|
|
}
|
|
func AtgBatchNormBackwardReduce(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, inputG int32, weightG int32, biasG int32){
|
|
cinputG := *(*C.int)(unsafe.Pointer(&inputG))
|
|
cweightG := *(*C.int)(unsafe.Pointer(&weightG))
|
|
cbiasG := *(*C.int)(unsafe.Pointer(&biasG))
|
|
C.atg_batch_norm_backward_reduce(ptr, gradOut, input, mean, invstd, weight, cinputG, cweightG, cbiasG)
|
|
}
|
|
func AtgBatchNormElemt(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
C.atg_batch_norm_elemt(ptr, input, weight, bias, mean, invstd, ceps)
|
|
}
|
|
func AtgBatchNormElemtOut(ptr *Ctensor, out Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
C.atg_batch_norm_elemt_out(ptr, out, input, weight, bias, mean, invstd, ceps)
|
|
}
|
|
func AtgBatchNormGatherStats(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, count int64){
|
|
cmomentum := *(*C.double)(unsafe.Pointer(&momentum))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
ccount := *(*C.int64_t)(unsafe.Pointer(&count))
|
|
C.atg_batch_norm_gather_stats(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccount)
|
|
}
|
|
func AtgBatchNormGatherStatsWithCounts(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, countsData []int64, countsLen int){
|
|
cmomentum := *(*C.double)(unsafe.Pointer(&momentum))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
ccountsDataPtr := (*C.int64_t)(unsafe.Pointer(&countsData[0]))
|
|
ccountsLen := *(*C.int)(unsafe.Pointer(&countsLen))
|
|
C.atg_batch_norm_gather_stats_with_counts(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccountsDataPtr, ccountsLen)
|
|
}
|
|
func AtgBatchNormStats(ptr *Ctensor, input Ctensor, eps float64){
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
C.atg_batch_norm_stats(ptr, input, ceps)
|
|
}
|
|
func AtgBatchNormUpdateStats(ptr *Ctensor, input Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64){
|
|
cmomentum := *(*C.double)(unsafe.Pointer(&momentum))
|
|
C.atg_batch_norm_update_stats(ptr, input, runningMean, runningVar, cmomentum)
|
|
}
|
|
func AtgBernoulli(ptr *Ctensor, self Ctensor){
|
|
C.atg_bernoulli(ptr, self)
|
|
}
|
|
func AtgBernoulli1(ptr *Ctensor, self Ctensor, p float64){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
C.atg_bernoulli1(ptr, self, cp)
|
|
}
|
|
func AtgBernoulli_(ptr *Ctensor, self Ctensor, p Ctensor){
|
|
C.atg_bernoulli_(ptr, self, p)
|
|
}
|
|
func AtgBernoulli1_(ptr *Ctensor, self Ctensor, p float64){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
C.atg_bernoulli_1(ptr, self, cp)
|
|
}
|
|
func AtgBernoulliOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_bernoulli_out(ptr, out, self)
|
|
}
|
|
func AtgBilinear(ptr *Ctensor, input1 Ctensor, input2 Ctensor, weight Ctensor, bias Ctensor){
|
|
C.atg_bilinear(ptr, input1, input2, weight, bias)
|
|
}
|
|
func AtgBinaryCrossEntropy(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_binary_cross_entropy(ptr, self, target, weight, creduction)
|
|
}
|
|
func AtgBinaryCrossEntropyBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_binary_cross_entropy_backward(ptr, gradOutput, self, target, weight, creduction)
|
|
}
|
|
func AtgBinaryCrossEntropyBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_binary_cross_entropy_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction)
|
|
}
|
|
func AtgBinaryCrossEntropyOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_binary_cross_entropy_out(ptr, out, self, target, weight, creduction)
|
|
}
|
|
func AtgBinaryCrossEntropyWithLogits(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_binary_cross_entropy_with_logits(ptr, self, target, weight, posWeight, creduction)
|
|
}
|
|
func AtgBinaryCrossEntropyWithLogitsBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_binary_cross_entropy_with_logits_backward(ptr, gradOutput, self, target, weight, posWeight, creduction)
|
|
}
|
|
func AtgBincount(ptr *Ctensor, self Ctensor, weights Ctensor, minlength int64){
|
|
cminlength := *(*C.int64_t)(unsafe.Pointer(&minlength))
|
|
C.atg_bincount(ptr, self, weights, cminlength)
|
|
}
|
|
func AtgBitwiseAnd(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_and(ptr, self, other )
|
|
}
|
|
func AtgBitwiseAnd1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_and1(ptr, self, other)
|
|
}
|
|
func AtgBitwiseAnd_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_and_(ptr, self, other )
|
|
}
|
|
func AtgBitwiseAnd1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_and_1(ptr, self, other)
|
|
}
|
|
func AtgBitwiseAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_and_out(ptr, out, self, other)
|
|
}
|
|
func AtgBitwiseAndOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_and_out1(ptr, out, self, other )
|
|
}
|
|
func AtgBitwiseNot(ptr *Ctensor, self Ctensor){
|
|
C.atg_bitwise_not(ptr, self)
|
|
}
|
|
func AtgBitwiseNot_(ptr *Ctensor, self Ctensor){
|
|
C.atg_bitwise_not_(ptr, self)
|
|
}
|
|
func AtgBitwiseNotOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_bitwise_not_out(ptr, out, self)
|
|
}
|
|
func AtgBitwiseOr(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_or(ptr, self, other )
|
|
}
|
|
func AtgBitwiseOr1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_or1(ptr, self, other)
|
|
}
|
|
func AtgBitwiseOr_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_or_(ptr, self, other )
|
|
}
|
|
func AtgBitwiseOr1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_or_1(ptr, self, other)
|
|
}
|
|
func AtgBitwiseOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_or_out(ptr, out, self, other)
|
|
}
|
|
func AtgBitwiseOrOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_or_out1(ptr, out, self, other )
|
|
}
|
|
func AtgBitwiseXor(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_xor(ptr, self, other )
|
|
}
|
|
func AtgBitwiseXor1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_xor1(ptr, self, other)
|
|
}
|
|
func AtgBitwiseXor_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_xor_(ptr, self, other )
|
|
}
|
|
func AtgBitwiseXor1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_xor_1(ptr, self, other)
|
|
}
|
|
func AtgBitwiseXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_bitwise_xor_out(ptr, out, self, other)
|
|
}
|
|
func AtgBitwiseXorOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_bitwise_xor_out1(ptr, out, self, other )
|
|
}
|
|
func AtgBlackmanWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_blackman_window(ptr, cwindowLength, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgBlackmanWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
cperiodic := *(*C.int)(unsafe.Pointer(&periodic))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_blackman_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgBmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){
|
|
C.atg_bmm(ptr, self, mat2)
|
|
}
|
|
func AtgBmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){
|
|
C.atg_bmm_out(ptr, out, self, mat2)
|
|
}
|
|
|
|
func AtgCartesianProd(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
C.atg_cartesian_prod(ptr, ctensorsDataPtr, ctensorsLen)
|
|
}
|
|
func AtgCat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cat(ptr, ctensorsDataPtr, ctensorsLen, cdim)
|
|
}
|
|
func AtgCatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim)
|
|
}
|
|
func AtgCauchy_(ptr *Ctensor, self Ctensor, median float64, sigma float64){
|
|
cmedian := *(*C.double)(unsafe.Pointer(&median))
|
|
csigma := *(*C.double)(unsafe.Pointer(&sigma))
|
|
C.atg_cauchy_(ptr, self, cmedian, csigma)
|
|
}
|
|
func AtgCdist(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, computeMode int64){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ccomputeMode := *(*C.int64_t)(unsafe.Pointer(&computeMode))
|
|
C.atg_cdist(ptr, x1, x2, cp, ccomputeMode)
|
|
}
|
|
func AtgCeil(ptr *Ctensor, self Ctensor){
|
|
C.atg_ceil(ptr, self)
|
|
}
|
|
func AtgCeil_(ptr *Ctensor, self Ctensor){
|
|
C.atg_ceil_(ptr, self)
|
|
}
|
|
func AtgCeilOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_ceil_out(ptr, out, self)
|
|
}
|
|
func AtgCelu(ptr *Ctensor, self Ctensor){
|
|
C.atg_celu(ptr, self)
|
|
}
|
|
func AtgCelu_(ptr *Ctensor, self Ctensor){
|
|
C.atg_celu_(ptr, self)
|
|
}
|
|
func AtgChainMatmul(ptr *Ctensor, matricesData []Ctensor, matricesLen int){
|
|
cmatricesDataPtr := (*Ctensor)(unsafe.Pointer(&matricesData[0]))
|
|
cmatricesLen := *(*C.int)(unsafe.Pointer(&matricesLen))
|
|
C.atg_chain_matmul(ptr, cmatricesDataPtr, cmatricesLen)
|
|
}
|
|
func AtgCholesky(ptr *Ctensor, self Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_cholesky(ptr, self, cupper)
|
|
}
|
|
func AtgCholeskyInverse(ptr *Ctensor, self Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_cholesky_inverse(ptr, self, cupper)
|
|
}
|
|
func AtgCholeskyInverseOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_cholesky_inverse_out(ptr, out, self, cupper)
|
|
}
|
|
func AtgCholeskyOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_cholesky_out(ptr, out, self, cupper)
|
|
}
|
|
func AtgCholeskySolve(ptr *Ctensor, self Ctensor, input2 Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_cholesky_solve(ptr, self, input2, cupper)
|
|
}
|
|
func AtgCholeskySolveOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, upper int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_cholesky_solve_out(ptr, out, self, input2, cupper)
|
|
}
|
|
|
|
func AtgClamp(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){
|
|
C.atg_clamp(ptr, self, min , max )
|
|
}
|
|
func AtgClamp_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){
|
|
C.atg_clamp_(ptr, self, min , max )
|
|
}
|
|
func AtgClampMax(ptr *Ctensor, self Ctensor, max Cscalar){
|
|
C.atg_clamp_max(ptr, self, max )
|
|
}
|
|
func AtgClampMax_(ptr *Ctensor, self Ctensor, max Cscalar){
|
|
C.atg_clamp_max_(ptr, self, max )
|
|
}
|
|
func AtgClampMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, max Cscalar){
|
|
C.atg_clamp_max_out(ptr, out, self, max )
|
|
}
|
|
func AtgClampMin(ptr *Ctensor, self Ctensor, min Cscalar){
|
|
C.atg_clamp_min(ptr, self, min )
|
|
}
|
|
func AtgClampMin_(ptr *Ctensor, self Ctensor, min Cscalar){
|
|
C.atg_clamp_min_(ptr, self, min )
|
|
}
|
|
func AtgClampMinOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar){
|
|
C.atg_clamp_min_out(ptr, out, self, min )
|
|
}
|
|
func AtgClampOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar, max Cscalar){
|
|
C.atg_clamp_out(ptr, out, self, min , max )
|
|
}
|
|
func AtgCoalesce(ptr *Ctensor, self Ctensor){
|
|
C.atg_coalesce(ptr, self)
|
|
}
|
|
func AtgCol2im(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_col2im(ptr, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgCol2imBackward(ptr *Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_col2im_backward(ptr, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgCol2imBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_col2im_backward_out(ptr, gradInput, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgCol2imOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_col2im_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgCombinations(ptr *Ctensor, self Ctensor, r int64, withReplacement int32){
|
|
cr := *(*C.int64_t)(unsafe.Pointer(&r))
|
|
cwithReplacement := *(*C.int)(unsafe.Pointer(&withReplacement))
|
|
C.atg_combinations(ptr, self, cr, cwithReplacement)
|
|
}
|
|
func AtgConj(ptr *Ctensor, self Ctensor){
|
|
C.atg_conj(ptr, self)
|
|
}
|
|
func AtgConjOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_conj_out(ptr, out, self)
|
|
}
|
|
func AtgConstantPadNd(ptr *Ctensor, self Ctensor, padData []int64, padLen int){
|
|
cpadDataPtr := (*C.int64_t)(unsafe.Pointer(&padData[0]))
|
|
cpadLen := *(*C.int)(unsafe.Pointer(&padLen))
|
|
C.atg_constant_pad_nd(ptr, self, cpadDataPtr, cpadLen)
|
|
}
|
|
func AtgContiguous(ptr *Ctensor, self Ctensor){
|
|
C.atg_contiguous(ptr, self)
|
|
}
|
|
func AtgConv1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
C.atg_conv1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups)
|
|
}
|
|
func AtgConv2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
C.atg_conv2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups)
|
|
}
|
|
func AtgConv3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
C.atg_conv3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups)
|
|
}
|
|
func AtgConvTbc(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, pad int64){
|
|
cpad := *(*C.int64_t)(unsafe.Pointer(&pad))
|
|
C.atg_conv_tbc(ptr, self, weight, bias, cpad)
|
|
}
|
|
func AtgConvTbcBackward(ptr *Ctensor, self Ctensor, input Ctensor, weight Ctensor, bias Ctensor, pad int64){
|
|
cpad := *(*C.int64_t)(unsafe.Pointer(&pad))
|
|
C.atg_conv_tbc_backward(ptr, self, input, weight, bias, cpad)
|
|
}
|
|
func AtgConvTranspose1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_conv_transpose1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgConvTranspose2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_conv_transpose2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgConvTranspose3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_conv_transpose3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
ctransposed := *(*C.int)(unsafe.Pointer(&transposed))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
C.atg_convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups)
|
|
}
|
|
func AtgConvolutionOverrideable(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
ctransposed := *(*C.int)(unsafe.Pointer(&transposed))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
C.atg_convolution_overrideable(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups)
|
|
}
|
|
func AtgCopySparseToSparse_(ptr *Ctensor, self Ctensor, src Ctensor, nonBlocking int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
C.atg_copy_sparse_to_sparse_(ptr, self, src, cnonBlocking)
|
|
}
|
|
func AtgCos(ptr *Ctensor, self Ctensor){
|
|
C.atg_cos(ptr, self)
|
|
}
|
|
func AtgCos_(ptr *Ctensor, self Ctensor){
|
|
C.atg_cos_(ptr, self)
|
|
}
|
|
func AtgCosOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_cos_out(ptr, out, self)
|
|
}
|
|
func AtgCosh(ptr *Ctensor, self Ctensor){
|
|
C.atg_cosh(ptr, self)
|
|
}
|
|
func AtgCosh_(ptr *Ctensor, self Ctensor){
|
|
C.atg_cosh_(ptr, self)
|
|
}
|
|
func AtgCoshOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_cosh_out(ptr, out, self)
|
|
}
|
|
func AtgCosineEmbeddingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){
|
|
cmargin := *(*C.double)(unsafe.Pointer(&margin))
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_cosine_embedding_loss(ptr, input1, input2, target, cmargin, creduction)
|
|
}
|
|
func AtgCosineSimilarity(ptr *Ctensor, x1 Ctensor, x2 Ctensor, dim int64, eps float64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
C.atg_cosine_similarity(ptr, x1, x2, cdim, ceps)
|
|
}
|
|
func AtgCross(ptr *Ctensor, self Ctensor, other Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cross(ptr, self, other, cdim)
|
|
}
|
|
func AtgCrossOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cross_out(ptr, out, self, other, cdim)
|
|
}
|
|
func AtgCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, reduction int64, zeroInfinity int32){
|
|
cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0]))
|
|
cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen))
|
|
ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0]))
|
|
ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen))
|
|
cblank := *(*C.int64_t)(unsafe.Pointer(&blank))
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity))
|
|
C.atg_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, creduction, czeroInfinity)
|
|
}
|
|
func AtgCtcLoss1(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengths Ctensor, targetLengths Ctensor, blank int64, reduction int64, zeroInfinity int32){
|
|
cblank := *(*C.int64_t)(unsafe.Pointer(&blank))
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity))
|
|
C.atg_ctc_loss1(ptr, logProbs, targets, inputLengths, targetLengths, cblank, creduction, czeroInfinity)
|
|
}
|
|
func AtgCudnnAffineGridGenerator(ptr *Ctensor, theta Ctensor, n int64, c int64, h int64, w int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
cc := *(*C.int64_t)(unsafe.Pointer(&c))
|
|
ch := *(*C.int64_t)(unsafe.Pointer(&h))
|
|
cw := *(*C.int64_t)(unsafe.Pointer(&w))
|
|
C.atg_cudnn_affine_grid_generator(ptr, theta, cn, cc, ch, cw)
|
|
}
|
|
func AtgCudnnAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, n int64, c int64, h int64, w int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
cc := *(*C.int64_t)(unsafe.Pointer(&c))
|
|
ch := *(*C.int64_t)(unsafe.Pointer(&h))
|
|
cw := *(*C.int64_t)(unsafe.Pointer(&w))
|
|
C.atg_cudnn_affine_grid_generator_backward(ptr, grad, cn, cc, ch, cw)
|
|
}
|
|
func AtgCudnnBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor))
|
|
cepsilon := *(*C.double)(unsafe.Pointer(&epsilon))
|
|
C.atg_cudnn_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon)
|
|
}
|
|
func AtgCudnnBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64, reserveSpace Ctensor){
|
|
cepsilon := *(*C.double)(unsafe.Pointer(&epsilon))
|
|
C.atg_cudnn_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon, reserveSpace)
|
|
}
|
|
func AtgCudnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnConvolution1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0]))
|
|
cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0]))
|
|
cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution_transpose(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnConvolutionTranspose1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution_transpose1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0]))
|
|
cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_cudnn_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor){
|
|
C.atg_cudnn_grid_sampler(ptr, self, grid)
|
|
}
|
|
func AtgCudnnGridSamplerBackward(ptr *Ctensor, self Ctensor, grid Ctensor, gradOutput Ctensor){
|
|
C.atg_cudnn_grid_sampler_backward(ptr, self, grid, gradOutput)
|
|
}
|
|
func AtgCummax(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cummax(ptr, self, cdim)
|
|
}
|
|
func AtgCummaxOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cummax_out(ptr, values, indices, self, cdim)
|
|
}
|
|
func AtgCummin(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cummin(ptr, self, cdim)
|
|
}
|
|
func AtgCumminOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_cummin_out(ptr, values, indices, self, cdim)
|
|
}
|
|
func AtgCumprod(ptr *Ctensor, self Ctensor, dim int64, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_cumprod(ptr, self, cdim, cdtype)
|
|
}
|
|
func AtgCumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_cumprod_out(ptr, out, self, cdim, cdtype)
|
|
}
|
|
func AtgCumsum(ptr *Ctensor, self Ctensor, dim int64, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_cumsum(ptr, self, cdim, cdtype)
|
|
}
|
|
func AtgCumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_cumsum_out(ptr, out, self, cdim, cdtype)
|
|
}
|
|
func AtgData(ptr *Ctensor, self Ctensor){
|
|
C.atg_data(ptr, self)
|
|
}
|
|
func AtgDequantize(ptr *Ctensor, self Ctensor){
|
|
C.atg_dequantize(ptr, self)
|
|
}
|
|
func AtgDet(ptr *Ctensor, self Ctensor){
|
|
C.atg_det(ptr, self)
|
|
}
|
|
func AtgDetach(ptr *Ctensor, self Ctensor){
|
|
C.atg_detach(ptr, self)
|
|
}
|
|
func AtgDetach_(ptr *Ctensor, self Ctensor){
|
|
C.atg_detach_(ptr, self)
|
|
}
|
|
func AtgDiag(ptr *Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_diag(ptr, self, cdiagonal)
|
|
}
|
|
func AtgDiagEmbed(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){
|
|
coffset := *(*C.int64_t)(unsafe.Pointer(&offset))
|
|
cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1))
|
|
cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2))
|
|
C.atg_diag_embed(ptr, self, coffset, cdim1, cdim2)
|
|
}
|
|
func AtgDiagOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_diag_out(ptr, out, self, cdiagonal)
|
|
}
|
|
func AtgDiagflat(ptr *Ctensor, self Ctensor, offset int64){
|
|
coffset := *(*C.int64_t)(unsafe.Pointer(&offset))
|
|
C.atg_diagflat(ptr, self, coffset)
|
|
}
|
|
func AtgDiagonal(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){
|
|
coffset := *(*C.int64_t)(unsafe.Pointer(&offset))
|
|
cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1))
|
|
cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2))
|
|
C.atg_diagonal(ptr, self, coffset, cdim1, cdim2)
|
|
}
|
|
func AtgDigamma(ptr *Ctensor, self Ctensor){
|
|
C.atg_digamma(ptr, self)
|
|
}
|
|
func AtgDigamma_(ptr *Ctensor, self Ctensor){
|
|
C.atg_digamma_(ptr, self)
|
|
}
|
|
func AtgDigammaOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_digamma_out(ptr, out, self)
|
|
}
|
|
func AtgDist(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_dist(ptr, self, other)
|
|
}
|
|
func AtgDiv(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_div(ptr, self, other)
|
|
}
|
|
func AtgDiv1(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_div1(ptr, self, other )
|
|
}
|
|
func AtgDiv_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_div_(ptr, self, other)
|
|
}
|
|
func AtgDiv1_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_div_1(ptr, self, other )
|
|
}
|
|
func AtgDivOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_div_out(ptr, out, self, other)
|
|
}
|
|
func AtgDot(ptr *Ctensor, self Ctensor, tensor Ctensor){
|
|
C.atg_dot(ptr, self, tensor)
|
|
}
|
|
func AtgDotOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor Ctensor){
|
|
C.atg_dot_out(ptr, out, self, tensor)
|
|
}
|
|
func AtgDropout(ptr *Ctensor, input Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_dropout(ptr, input, cp, ctrain)
|
|
}
|
|
func AtgDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_dropout_(ptr, self, cp, ctrain)
|
|
}
|
|
func AtgEig(ptr *Ctensor, self Ctensor, eigenvectors int32){
|
|
ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors))
|
|
C.atg_eig(ptr, self, ceigenvectors)
|
|
}
|
|
func AtgEigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32){
|
|
ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors))
|
|
C.atg_eig_out(ptr, e, v, self, ceigenvectors)
|
|
}
|
|
func AtgEinsum(ptr *Ctensor, equation string, tensorsData []Ctensor, tensorsLen int){
|
|
cequation := C.CString(equation)
|
|
equationLen := len(equation)
|
|
cequationLen := *(*C.int)(unsafe.Pointer(&equationLen))
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
C.atg_einsum(ptr, cequation, cequationLen, ctensorsDataPtr, ctensorsLen)
|
|
}
|
|
func AtgElu(ptr *Ctensor, self Ctensor){
|
|
C.atg_elu(ptr, self)
|
|
}
|
|
func AtgElu_(ptr *Ctensor, self Ctensor){
|
|
C.atg_elu_(ptr, self)
|
|
}
|
|
func AtgEluBackward(ptr *Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){
|
|
C.atg_elu_backward(ptr, gradOutput, alpha , scale , inputScale , output)
|
|
}
|
|
func AtgEluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){
|
|
C.atg_elu_backward_out(ptr, gradInput, gradOutput, alpha , scale , inputScale , output)
|
|
}
|
|
func AtgEluOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_elu_out(ptr, out, self)
|
|
}
|
|
func AtgEmbedding(ptr *Ctensor, weight Ctensor, indices Ctensor, paddingIdx int64, scaleGradByFreq int32, sparse int32){
|
|
cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx))
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
csparse := *(*C.int)(unsafe.Pointer(&sparse))
|
|
C.atg_embedding(ptr, weight, indices, cpaddingIdx, cscaleGradByFreq, csparse)
|
|
}
|
|
func AtgEmbeddingBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32, sparse int32){
|
|
cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights))
|
|
cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx))
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
csparse := *(*C.int)(unsafe.Pointer(&sparse))
|
|
C.atg_embedding_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq, csparse)
|
|
}
|
|
func AtgEmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
csparse := *(*C.int)(unsafe.Pointer(&sparse))
|
|
cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset))
|
|
C.atg_embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset)
|
|
}
|
|
func AtgEmbeddingDenseBackward(ptr *Ctensor, gradOutput Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){
|
|
cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights))
|
|
cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx))
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
C.atg_embedding_dense_backward(ptr, gradOutput, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq)
|
|
}
|
|
func AtgEmbeddingRenorm_(ptr *Ctensor, self Ctensor, indices Ctensor, maxNorm float64, normType float64){
|
|
cmaxNorm := *(*C.double)(unsafe.Pointer(&maxNorm))
|
|
cnormType := *(*C.double)(unsafe.Pointer(&normType))
|
|
C.atg_embedding_renorm_(ptr, self, indices, cmaxNorm, cnormType)
|
|
}
|
|
func AtgEmbeddingSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){
|
|
cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights))
|
|
cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx))
|
|
cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq))
|
|
C.atg_embedding_sparse_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq)
|
|
}
|
|
func AtgEmpty(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_empty(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgEmptyLike(ptr *Ctensor, self Ctensor){
|
|
C.atg_empty_like(ptr, self)
|
|
}
|
|
func AtgEmptyOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_empty_out(ptr, out, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgEmptyStrided(ptr *Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_empty_strided(ptr, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgEq(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_eq(ptr, self, other )
|
|
}
|
|
func AtgEq1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_eq1(ptr, self, other)
|
|
}
|
|
func AtgEq_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_eq_(ptr, self, other )
|
|
}
|
|
func AtgEq1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_eq_1(ptr, self, other)
|
|
}
|
|
func AtgEqOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_eq_out(ptr, out, self, other )
|
|
}
|
|
func AtgEqOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_eq_out1(ptr, out, self, other)
|
|
}
|
|
func AtgErf(ptr *Ctensor, self Ctensor){
|
|
C.atg_erf(ptr, self)
|
|
}
|
|
func AtgErf_(ptr *Ctensor, self Ctensor){
|
|
C.atg_erf_(ptr, self)
|
|
}
|
|
func AtgErfOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_erf_out(ptr, out, self)
|
|
}
|
|
func AtgErfc(ptr *Ctensor, self Ctensor){
|
|
C.atg_erfc(ptr, self)
|
|
}
|
|
func AtgErfc_(ptr *Ctensor, self Ctensor){
|
|
C.atg_erfc_(ptr, self)
|
|
}
|
|
func AtgErfcOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_erfc_out(ptr, out, self)
|
|
}
|
|
func AtgErfinv(ptr *Ctensor, self Ctensor){
|
|
C.atg_erfinv(ptr, self)
|
|
}
|
|
func AtgErfinv_(ptr *Ctensor, self Ctensor){
|
|
C.atg_erfinv_(ptr, self)
|
|
}
|
|
func AtgErfinvOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_erfinv_out(ptr, out, self)
|
|
}
|
|
func AtgExp(ptr *Ctensor, self Ctensor){
|
|
C.atg_exp(ptr, self)
|
|
}
|
|
func AtgExp_(ptr *Ctensor, self Ctensor){
|
|
C.atg_exp_(ptr, self)
|
|
}
|
|
func AtgExpOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_exp_out(ptr, out, self)
|
|
}
|
|
func AtgExpand(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, implicit int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
cimplicit := *(*C.int)(unsafe.Pointer(&implicit))
|
|
C.atg_expand(ptr, self, csizeDataPtr, csizeLen, cimplicit)
|
|
}
|
|
func AtgExpandAs(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_expand_as(ptr, self, other)
|
|
}
|
|
func AtgExpm1(ptr *Ctensor, self Ctensor){
|
|
C.atg_expm1(ptr, self)
|
|
}
|
|
func AtgExpm1_(ptr *Ctensor, self Ctensor){
|
|
C.atg_expm1_(ptr, self)
|
|
}
|
|
func AtgExpm1Out(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_expm1_out(ptr, out, self)
|
|
}
|
|
func AtgExponential_(ptr *Ctensor, self Ctensor, lambd float64){
|
|
clambd := *(*C.double)(unsafe.Pointer(&lambd))
|
|
C.atg_exponential_(ptr, self, clambd)
|
|
}
|
|
func AtgEye(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_eye(ptr, cn, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgEye1(ptr *Ctensor, n int64, m int64, optionsKind int32, optionsDevice int32){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
cm := *(*C.int64_t)(unsafe.Pointer(&m))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_eye1(ptr, cn, cm, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgEyeOut(ptr *Ctensor, out Ctensor, n int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
C.atg_eye_out(ptr, out, cn)
|
|
}
|
|
func AtgEyeOut1(ptr *Ctensor, out Ctensor, n int64, m int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
cm := *(*C.int64_t)(unsafe.Pointer(&m))
|
|
C.atg_eye_out1(ptr, out, cn, cm)
|
|
}
|
|
func AtgFakeQuantizePerChannelAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){
|
|
caxis := *(*C.int64_t)(unsafe.Pointer(&axis))
|
|
cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin))
|
|
cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax))
|
|
C.atg_fake_quantize_per_channel_affine(ptr, self, scale, zeroPoint, caxis, cquantMin, cquantMax)
|
|
}
|
|
func AtgFakeQuantizePerChannelAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){
|
|
caxis := *(*C.int64_t)(unsafe.Pointer(&axis))
|
|
cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin))
|
|
cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax))
|
|
C.atg_fake_quantize_per_channel_affine_backward(ptr, grad, self, scale, zeroPoint, caxis, cquantMin, cquantMax)
|
|
}
|
|
func AtgFakeQuantizePerTensorAffine(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){
|
|
cscale := *(*C.double)(unsafe.Pointer(&scale))
|
|
czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint))
|
|
cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin))
|
|
cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax))
|
|
C.atg_fake_quantize_per_tensor_affine(ptr, self, cscale, czeroPoint, cquantMin, cquantMax)
|
|
}
|
|
func AtgFakeQuantizePerTensorAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){
|
|
cscale := *(*C.double)(unsafe.Pointer(&scale))
|
|
czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint))
|
|
cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin))
|
|
cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax))
|
|
C.atg_fake_quantize_per_tensor_affine_backward(ptr, grad, self, cscale, czeroPoint, cquantMin, cquantMax)
|
|
}
|
|
func AtgFbgemmLinearFp16Weight(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){
|
|
C.atg_fbgemm_linear_fp16_weight(ptr, input, packedWeight, bias)
|
|
}
|
|
func AtgFbgemmLinearFp16WeightFp32Activation(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){
|
|
C.atg_fbgemm_linear_fp16_weight_fp32_activation(ptr, input, packedWeight, bias)
|
|
}
|
|
func AtgFbgemmLinearInt8Weight(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){
|
|
C.atg_fbgemm_linear_int8_weight(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias)
|
|
}
|
|
func AtgFbgemmLinearInt8WeightFp32Activation(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){
|
|
C.atg_fbgemm_linear_int8_weight_fp32_activation(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias)
|
|
}
|
|
func AtgFbgemmPackGemmMatrixFp16(ptr *Ctensor, input Ctensor){
|
|
C.atg_fbgemm_pack_gemm_matrix_fp16(ptr, input)
|
|
}
|
|
func AtgFbgemmPackQuantizedMatrix(ptr *Ctensor, input Ctensor){
|
|
C.atg_fbgemm_pack_quantized_matrix(ptr, input)
|
|
}
|
|
func AtgFbgemmPackQuantizedMatrix1(ptr *Ctensor, input Ctensor, k int64, n int64){
|
|
ck := *(*C.int64_t)(unsafe.Pointer(&k))
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
C.atg_fbgemm_pack_quantized_matrix1(ptr, input, ck, cn)
|
|
}
|
|
func AtgFeatureAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_feature_alpha_dropout(ptr, input, cp, ctrain)
|
|
}
|
|
func AtgFeatureAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_feature_alpha_dropout_(ptr, self, cp, ctrain)
|
|
}
|
|
func AtgFeatureDropout(ptr *Ctensor, input Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_feature_dropout(ptr, input, cp, ctrain)
|
|
}
|
|
func AtgFeatureDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
C.atg_feature_dropout_(ptr, self, cp, ctrain)
|
|
}
|
|
func AtgFft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){
|
|
csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim))
|
|
cnormalized := *(*C.int)(unsafe.Pointer(&normalized))
|
|
C.atg_fft(ptr, self, csignalNdim, cnormalized)
|
|
}
|
|
func AtgFill_(ptr *Ctensor, self Ctensor, value Cscalar){
|
|
C.atg_fill_(ptr, self, value )
|
|
}
|
|
func AtgFill1_(ptr *Ctensor, self Ctensor, value Ctensor){
|
|
C.atg_fill_1(ptr, self, value)
|
|
}
|
|
func AtgFillDiagonal_(ptr *Ctensor, self Ctensor, fillValue Cscalar, wrap int32){
|
|
cwrap := *(*C.int)(unsafe.Pointer(&wrap))
|
|
C.atg_fill_diagonal_(ptr, self, fillValue , cwrap)
|
|
}
|
|
func AtgFlatten(ptr *Ctensor, self Ctensor, startDim int64, endDim int64){
|
|
cstartDim := *(*C.int64_t)(unsafe.Pointer(&startDim))
|
|
cendDim := *(*C.int64_t)(unsafe.Pointer(&endDim))
|
|
C.atg_flatten(ptr, self, cstartDim, cendDim)
|
|
}
|
|
func AtgFlip(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){
|
|
cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0]))
|
|
cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen))
|
|
C.atg_flip(ptr, self, cdimsDataPtr, cdimsLen)
|
|
}
|
|
func AtgFloor(ptr *Ctensor, self Ctensor){
|
|
C.atg_floor(ptr, self)
|
|
}
|
|
func AtgFloor_(ptr *Ctensor, self Ctensor){
|
|
C.atg_floor_(ptr, self)
|
|
}
|
|
func AtgFloorDivide(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_floor_divide(ptr, self, other)
|
|
}
|
|
func AtgFloorDivide1(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_floor_divide1(ptr, self, other )
|
|
}
|
|
func AtgFloorDivide_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_floor_divide_(ptr, self, other)
|
|
}
|
|
func AtgFloorDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_floor_divide_1(ptr, self, other )
|
|
}
|
|
func AtgFloorDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_floor_divide_out(ptr, out, self, other)
|
|
}
|
|
func AtgFloorOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_floor_out(ptr, out, self)
|
|
}
|
|
func AtgFmod(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_fmod(ptr, self, other )
|
|
}
|
|
func AtgFmod1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_fmod1(ptr, self, other)
|
|
}
|
|
func AtgFmod_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_fmod_(ptr, self, other )
|
|
}
|
|
func AtgFmod1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_fmod_1(ptr, self, other)
|
|
}
|
|
func AtgFmodOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_fmod_out(ptr, out, self, other )
|
|
}
|
|
func AtgFmodOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_fmod_out1(ptr, out, self, other)
|
|
}
|
|
func AtgFrac(ptr *Ctensor, self Ctensor){
|
|
C.atg_frac(ptr, self)
|
|
}
|
|
func AtgFrac_(ptr *Ctensor, self Ctensor){
|
|
C.atg_frac_(ptr, self)
|
|
}
|
|
func AtgFracOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_frac_out(ptr, out, self)
|
|
}
|
|
func AtgFractionalMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples)
|
|
}
|
|
func AtgFractionalMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices)
|
|
}
|
|
func AtgFractionalMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices)
|
|
}
|
|
func AtgFractionalMaxPool2dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool2d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples)
|
|
}
|
|
func AtgFractionalMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples)
|
|
}
|
|
func AtgFractionalMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices)
|
|
}
|
|
func AtgFractionalMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices)
|
|
}
|
|
func AtgFractionalMaxPool3dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_fractional_max_pool3d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples)
|
|
}
|
|
func AtgFrobeniusNorm(ptr *Ctensor, self Ctensor){
|
|
C.atg_frobenius_norm(ptr, self)
|
|
}
|
|
func AtgFrobeniusNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_frobenius_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgFrobeniusNormOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_frobenius_norm_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgFromFile(ptr *Ctensor, filename string, shared int32, size int64, optionsKind int32, optionsDevice int32){
|
|
cfilename := C.CString(filename)
|
|
filenameLen := len(filename)
|
|
cfilenameLen := *(*C.int)(unsafe.Pointer(&filenameLen))
|
|
cshared := *(*C.int)(unsafe.Pointer(&shared))
|
|
csize := *(*C.int64_t)(unsafe.Pointer(&size))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_from_file(ptr, cfilename, cfilenameLen, cshared, csize, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgFull(ptr *Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_full(ptr, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgFullLike(ptr *Ctensor, self Ctensor, fillValue Cscalar){
|
|
C.atg_full_like(ptr, self, fillValue )
|
|
}
|
|
func AtgFullOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_full_out(ptr, out, csizeDataPtr, csizeLen, fillValue )
|
|
}
|
|
func AtgGather(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad))
|
|
C.atg_gather(ptr, self, cdim, index, csparseGrad)
|
|
}
|
|
func AtgGatherOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad))
|
|
C.atg_gather_out(ptr, out, self, cdim, index, csparseGrad)
|
|
}
|
|
func AtgGe(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_ge(ptr, self, other )
|
|
}
|
|
func AtgGe1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_ge1(ptr, self, other)
|
|
}
|
|
func AtgGe_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_ge_(ptr, self, other )
|
|
}
|
|
func AtgGe1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_ge_1(ptr, self, other)
|
|
}
|
|
func AtgGeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_ge_out(ptr, out, self, other )
|
|
}
|
|
func AtgGeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_ge_out1(ptr, out, self, other)
|
|
}
|
|
func AtgGelu(ptr *Ctensor, self Ctensor){
|
|
C.atg_gelu(ptr, self)
|
|
}
|
|
func AtgGeluBackward(ptr *Ctensor, grad Ctensor, self Ctensor){
|
|
C.atg_gelu_backward(ptr, grad, self)
|
|
}
|
|
func AtgGeometric_(ptr *Ctensor, self Ctensor, p float64){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
C.atg_geometric_(ptr, self, cp)
|
|
}
|
|
func AtgGeqrf(ptr *Ctensor, self Ctensor){
|
|
C.atg_geqrf(ptr, self)
|
|
}
|
|
func AtgGeqrfOut(ptr *Ctensor, a Ctensor, tau Ctensor, self Ctensor){
|
|
C.atg_geqrf_out(ptr, a, tau, self)
|
|
}
|
|
func AtgGer(ptr *Ctensor, self Ctensor, vec2 Ctensor){
|
|
C.atg_ger(ptr, self, vec2)
|
|
}
|
|
func AtgGerOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor){
|
|
C.atg_ger_out(ptr, out, self, vec2)
|
|
}
|
|
func AtgGlu(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_glu(ptr, self, cdim)
|
|
}
|
|
func AtgGluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_glu_backward(ptr, gradOutput, self, cdim)
|
|
}
|
|
func AtgGluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_glu_backward_out(ptr, gradInput, gradOutput, self, cdim)
|
|
}
|
|
func AtgGluOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_glu_out(ptr, out, self, cdim)
|
|
}
|
|
func AtgGrad(ptr *Ctensor, self Ctensor){
|
|
C.atg_grad(ptr, self)
|
|
}
|
|
func AtgGridSampler(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){
|
|
cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode))
|
|
cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
C.atg_grid_sampler(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners)
|
|
}
|
|
func AtgGridSampler2d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){
|
|
cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode))
|
|
cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
C.atg_grid_sampler_2d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners)
|
|
}
|
|
func AtgGridSampler2dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){
|
|
cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode))
|
|
cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
C.atg_grid_sampler_2d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners)
|
|
}
|
|
func AtgGridSampler3d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){
|
|
cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode))
|
|
cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
C.atg_grid_sampler_3d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners)
|
|
}
|
|
func AtgGridSampler3dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){
|
|
cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode))
|
|
cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
C.atg_grid_sampler_3d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners)
|
|
}
|
|
func AtgGroupNorm(ptr *Ctensor, input Ctensor, numGroups int64, weight Ctensor, bias Ctensor, eps float64, cudnnEnabled int32){
|
|
cnumGroups := *(*C.int64_t)(unsafe.Pointer(&numGroups))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled))
|
|
C.atg_group_norm(ptr, input, cnumGroups, weight, bias, ceps, ccudnnEnabled)
|
|
}
|
|
func AtgGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
C.atg_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst)
|
|
}
|
|
func AtgGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
C.atg_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional)
|
|
}
|
|
func AtgGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){
|
|
C.atg_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh)
|
|
}
|
|
func AtgGt(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_gt(ptr, self, other )
|
|
}
|
|
func AtgGt1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_gt1(ptr, self, other)
|
|
}
|
|
func AtgGt_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_gt_(ptr, self, other )
|
|
}
|
|
func AtgGt1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_gt_1(ptr, self, other)
|
|
}
|
|
func AtgGtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_gt_out(ptr, out, self, other )
|
|
}
|
|
func AtgGtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_gt_out1(ptr, out, self, other)
|
|
}
|
|
func AtgHammingWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_hamming_window(ptr, cwindowLength, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgHammingWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
cperiodic := *(*C.int)(unsafe.Pointer(&periodic))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_hamming_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgHammingWindow2(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
cperiodic := *(*C.int)(unsafe.Pointer(&periodic))
|
|
calpha := *(*C.double)(unsafe.Pointer(&alpha))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_hamming_window2(ptr, cwindowLength, cperiodic, calpha, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgHammingWindow3(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, beta float64, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
cperiodic := *(*C.int)(unsafe.Pointer(&periodic))
|
|
calpha := *(*C.double)(unsafe.Pointer(&alpha))
|
|
cbeta := *(*C.double)(unsafe.Pointer(&beta))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_hamming_window3(ptr, cwindowLength, cperiodic, calpha, cbeta, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgHannWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_hann_window(ptr, cwindowLength, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgHannWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){
|
|
cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength))
|
|
cperiodic := *(*C.int)(unsafe.Pointer(&periodic))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_hann_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgHardshrink(ptr *Ctensor, self Ctensor){
|
|
C.atg_hardshrink(ptr, self)
|
|
}
|
|
func AtgHardshrinkBackward(ptr *Ctensor, gradOut Ctensor, self Ctensor, lambd Cscalar){
|
|
C.atg_hardshrink_backward(ptr, gradOut, self, lambd )
|
|
}
|
|
func AtgHardsigmoid(ptr *Ctensor, self Ctensor){
|
|
C.atg_hardsigmoid(ptr, self)
|
|
}
|
|
func AtgHardsigmoid_(ptr *Ctensor, self Ctensor){
|
|
C.atg_hardsigmoid_(ptr, self)
|
|
}
|
|
func AtgHardsigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){
|
|
C.atg_hardsigmoid_backward(ptr, gradOutput, self)
|
|
}
|
|
func AtgHardsigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_hardsigmoid_out(ptr, out, self)
|
|
}
|
|
func AtgHardtanh(ptr *Ctensor, self Ctensor){
|
|
C.atg_hardtanh(ptr, self)
|
|
}
|
|
func AtgHardtanh_(ptr *Ctensor, self Ctensor){
|
|
C.atg_hardtanh_(ptr, self)
|
|
}
|
|
func AtgHardtanhBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){
|
|
C.atg_hardtanh_backward(ptr, gradOutput, self, minVal , maxVal )
|
|
}
|
|
func AtgHardtanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){
|
|
C.atg_hardtanh_backward_out(ptr, gradInput, gradOutput, self, minVal , maxVal )
|
|
}
|
|
func AtgHardtanhOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_hardtanh_out(ptr, out, self)
|
|
}
|
|
func AtgHingeEmbeddingLoss(ptr *Ctensor, self Ctensor, target Ctensor, margin float64, reduction int64){
|
|
cmargin := *(*C.double)(unsafe.Pointer(&margin))
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_hinge_embedding_loss(ptr, self, target, cmargin, creduction)
|
|
}
|
|
func AtgHistc(ptr *Ctensor, self Ctensor, bins int64){
|
|
cbins := *(*C.int64_t)(unsafe.Pointer(&bins))
|
|
C.atg_histc(ptr, self, cbins)
|
|
}
|
|
func AtgHistcOut(ptr *Ctensor, out Ctensor, self Ctensor, bins int64){
|
|
cbins := *(*C.int64_t)(unsafe.Pointer(&bins))
|
|
C.atg_histc_out(ptr, out, self, cbins)
|
|
}
|
|
func AtgHspmm(ptr *Ctensor, mat1 Ctensor, mat2 Ctensor){
|
|
C.atg_hspmm(ptr, mat1, mat2)
|
|
}
|
|
func AtgHspmmOut(ptr *Ctensor, out Ctensor, mat1 Ctensor, mat2 Ctensor){
|
|
C.atg_hspmm_out(ptr, out, mat1, mat2)
|
|
}
|
|
func AtgIfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){
|
|
csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim))
|
|
cnormalized := *(*C.int)(unsafe.Pointer(&normalized))
|
|
C.atg_ifft(ptr, self, csignalNdim, cnormalized)
|
|
}
|
|
func AtgIm2col(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_im2col(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgIm2colBackward(ptr *Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_im2col_backward(ptr, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgIm2colBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_im2col_backward_out(ptr, gradInput, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgIm2colOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
C.atg_im2col_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen)
|
|
}
|
|
func AtgImag(ptr *Ctensor, self Ctensor){
|
|
C.atg_imag(ptr, self)
|
|
}
|
|
func AtgIndex(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int){
|
|
cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0]))
|
|
cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen))
|
|
C.atg_index(ptr, self, cindicesDataPtr, cindicesLen)
|
|
}
|
|
func AtgIndexAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_add(ptr, self, cdim, index, source)
|
|
}
|
|
func AtgIndexAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_add_(ptr, self, cdim, index, source)
|
|
}
|
|
func AtgIndexCopy(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_copy(ptr, self, cdim, index, source)
|
|
}
|
|
func AtgIndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_copy_(ptr, self, cdim, index, source)
|
|
}
|
|
func AtgIndexFill(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_fill(ptr, self, cdim, index, value )
|
|
}
|
|
func AtgIndexFill1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_fill1(ptr, self, cdim, index, value)
|
|
}
|
|
func AtgIndexFill_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_fill_(ptr, self, cdim, index, value )
|
|
}
|
|
func AtgIndexFill1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_fill_1(ptr, self, cdim, index, value)
|
|
}
|
|
func AtgIndexPut(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){
|
|
cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0]))
|
|
cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen))
|
|
caccumulate := *(*C.int)(unsafe.Pointer(&accumulate))
|
|
C.atg_index_put(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate)
|
|
}
|
|
func AtgIndexPut_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){
|
|
cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0]))
|
|
cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen))
|
|
caccumulate := *(*C.int)(unsafe.Pointer(&accumulate))
|
|
C.atg_index_put_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate)
|
|
}
|
|
func AtgIndexSelect(ptr *Ctensor, self Ctensor, dim int64, index Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_select(ptr, self, cdim, index)
|
|
}
|
|
func AtgIndexSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_index_select_out(ptr, out, self, cdim, index)
|
|
}
|
|
func AtgIndices(ptr *Ctensor, self Ctensor){
|
|
C.atg_indices(ptr, self)
|
|
}
|
|
func AtgInstanceNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, useInputStats int32, momentum float64, eps float64, cudnnEnabled int32){
|
|
cuseInputStats := *(*C.int)(unsafe.Pointer(&useInputStats))
|
|
cmomentum := *(*C.double)(unsafe.Pointer(&momentum))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled))
|
|
C.atg_instance_norm(ptr, input, weight, bias, runningMean, runningVar, cuseInputStats, cmomentum, ceps, ccudnnEnabled)
|
|
}
|
|
func AtgIntRepr(ptr *Ctensor, self Ctensor){
|
|
C.atg_int_repr(ptr, self)
|
|
}
|
|
func AtgInverse(ptr *Ctensor, self Ctensor){
|
|
C.atg_inverse(ptr, self)
|
|
}
|
|
func AtgInverseOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_inverse_out(ptr, out, self)
|
|
}
|
|
func AtgIrfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32, signalSizesData []int64, signalSizesLen int){
|
|
csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim))
|
|
cnormalized := *(*C.int)(unsafe.Pointer(&normalized))
|
|
conesided := *(*C.int)(unsafe.Pointer(&onesided))
|
|
csignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&signalSizesData[0]))
|
|
csignalSizesLen := *(*C.int)(unsafe.Pointer(&signalSizesLen))
|
|
C.atg_irfft(ptr, self, csignalNdim, cnormalized, conesided, csignalSizesDataPtr, csignalSizesLen)
|
|
}
|
|
func AtgIsclose(ptr *Ctensor, self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32){
|
|
crtol := *(*C.double)(unsafe.Pointer(&rtol))
|
|
catol := *(*C.double)(unsafe.Pointer(&atol))
|
|
cequalNan := *(*C.int)(unsafe.Pointer(&equalNan))
|
|
C.atg_isclose(ptr, self, other, crtol, catol, cequalNan)
|
|
}
|
|
func AtgIsfinite(ptr *Ctensor, self Ctensor){
|
|
C.atg_isfinite(ptr, self)
|
|
}
|
|
func AtgIsinf(ptr *Ctensor, self Ctensor){
|
|
C.atg_isinf(ptr, self)
|
|
}
|
|
func AtgIsnan(ptr *Ctensor, self Ctensor){
|
|
C.atg_isnan(ptr, self)
|
|
}
|
|
func AtgKlDiv(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_kl_div(ptr, self, target, creduction)
|
|
}
|
|
func AtgKlDivBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_kl_div_backward(ptr, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgKthvalue(ptr *Ctensor, self Ctensor, k int64, dim int64, keepdim int32){
|
|
ck := *(*C.int64_t)(unsafe.Pointer(&k))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_kthvalue(ptr, self, ck, cdim, ckeepdim)
|
|
}
|
|
func AtgKthvalueOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, keepdim int32){
|
|
ck := *(*C.int64_t)(unsafe.Pointer(&k))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_kthvalue_out(ptr, values, indices, self, ck, cdim, ckeepdim)
|
|
}
|
|
func AtgL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_l1_loss(ptr, self, target, creduction)
|
|
}
|
|
func AtgL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_l1_loss_backward(ptr, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_l1_loss_out(ptr, out, self, target, creduction)
|
|
}
|
|
func AtgLayerNorm(ptr *Ctensor, input Ctensor, normalizedShapeData []int64, normalizedShapeLen int, weight Ctensor, bias Ctensor, eps float64, cudnnEnable int32){
|
|
cnormalizedShapeDataPtr := (*C.int64_t)(unsafe.Pointer(&normalizedShapeData[0]))
|
|
cnormalizedShapeLen := *(*C.int)(unsafe.Pointer(&normalizedShapeLen))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable))
|
|
C.atg_layer_norm(ptr, input, cnormalizedShapeDataPtr, cnormalizedShapeLen, weight, bias, ceps, ccudnnEnable)
|
|
}
|
|
func AtgLe(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_le(ptr, self, other )
|
|
}
|
|
func AtgLe1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_le1(ptr, self, other)
|
|
}
|
|
func AtgLe_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_le_(ptr, self, other )
|
|
}
|
|
func AtgLe1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_le_1(ptr, self, other)
|
|
}
|
|
func AtgLeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_le_out(ptr, out, self, other )
|
|
}
|
|
func AtgLeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_le_out1(ptr, out, self, other)
|
|
}
|
|
func AtgLeakyRelu(ptr *Ctensor, self Ctensor){
|
|
C.atg_leaky_relu(ptr, self)
|
|
}
|
|
func AtgLeakyRelu_(ptr *Ctensor, self Ctensor){
|
|
C.atg_leaky_relu_(ptr, self)
|
|
}
|
|
func AtgLeakyReluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, negativeSlope Cscalar, selfIsResult int32){
|
|
cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult))
|
|
C.atg_leaky_relu_backward(ptr, gradOutput, self, negativeSlope , cselfIsResult)
|
|
}
|
|
func AtgLeakyReluOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_leaky_relu_out(ptr, out, self)
|
|
}
|
|
func AtgLerp(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){
|
|
C.atg_lerp(ptr, self, end, weight )
|
|
}
|
|
func AtgLerp1(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){
|
|
C.atg_lerp1(ptr, self, end, weight)
|
|
}
|
|
func AtgLerp_(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){
|
|
C.atg_lerp_(ptr, self, end, weight )
|
|
}
|
|
func AtgLerp1_(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){
|
|
C.atg_lerp_1(ptr, self, end, weight)
|
|
}
|
|
func AtgLerpOut(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Cscalar){
|
|
C.atg_lerp_out(ptr, out, self, end, weight )
|
|
}
|
|
func AtgLerpOut1(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Ctensor){
|
|
C.atg_lerp_out1(ptr, out, self, end, weight)
|
|
}
|
|
func AtgLgamma(ptr *Ctensor, self Ctensor){
|
|
C.atg_lgamma(ptr, self)
|
|
}
|
|
func AtgLgamma_(ptr *Ctensor, self Ctensor){
|
|
C.atg_lgamma_(ptr, self)
|
|
}
|
|
func AtgLgammaOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_lgamma_out(ptr, out, self)
|
|
}
|
|
func AtgLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){
|
|
C.atg_linear(ptr, input, weight, bias)
|
|
}
|
|
func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, optionsKind int32, optionsDevice int32){
|
|
csteps := *(*C.int64_t)(unsafe.Pointer(&steps))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_linspace(ptr, start , end , csteps, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64){
|
|
csteps := *(*C.int64_t)(unsafe.Pointer(&steps))
|
|
C.atg_linspace_out(ptr, out, start , end , csteps)
|
|
}
|
|
func AtgLog(ptr *Ctensor, self Ctensor){
|
|
C.atg_log(ptr, self)
|
|
}
|
|
func AtgLog10(ptr *Ctensor, self Ctensor){
|
|
C.atg_log10(ptr, self)
|
|
}
|
|
func AtgLog10_(ptr *Ctensor, self Ctensor){
|
|
C.atg_log10_(ptr, self)
|
|
}
|
|
func AtgLog10Out(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_log10_out(ptr, out, self)
|
|
}
|
|
func AtgLog1p(ptr *Ctensor, self Ctensor){
|
|
C.atg_log1p(ptr, self)
|
|
}
|
|
func AtgLog1p_(ptr *Ctensor, self Ctensor){
|
|
C.atg_log1p_(ptr, self)
|
|
}
|
|
func AtgLog1pOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_log1p_out(ptr, out, self)
|
|
}
|
|
func AtgLog2(ptr *Ctensor, self Ctensor){
|
|
C.atg_log2(ptr, self)
|
|
}
|
|
func AtgLog2_(ptr *Ctensor, self Ctensor){
|
|
C.atg_log2_(ptr, self)
|
|
}
|
|
func AtgLog2Out(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_log2_out(ptr, out, self)
|
|
}
|
|
func AtgLog_(ptr *Ctensor, self Ctensor){
|
|
C.atg_log_(ptr, self)
|
|
}
|
|
func AtgLogNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){
|
|
cmean := *(*C.double)(unsafe.Pointer(&mean))
|
|
cstd := *(*C.double)(unsafe.Pointer(&std))
|
|
C.atg_log_normal_(ptr, self, cmean, cstd)
|
|
}
|
|
func AtgLogOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_log_out(ptr, out, self)
|
|
}
|
|
func AtgLogSigmoid(ptr *Ctensor, self Ctensor){
|
|
C.atg_log_sigmoid(ptr, self)
|
|
}
|
|
func AtgLogSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){
|
|
C.atg_log_sigmoid_backward(ptr, gradOutput, self, buffer)
|
|
}
|
|
func AtgLogSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){
|
|
C.atg_log_sigmoid_backward_out(ptr, gradInput, gradOutput, self, buffer)
|
|
}
|
|
func AtgLogSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_log_sigmoid_out(ptr, out, self)
|
|
}
|
|
func AtgLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_log_softmax(ptr, self, cdim, cdtype)
|
|
}
|
|
func AtgLogdet(ptr *Ctensor, self Ctensor){
|
|
C.atg_logdet(ptr, self)
|
|
}
|
|
func AtgLogicalAnd(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_and(ptr, self, other)
|
|
}
|
|
func AtgLogicalAnd_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_and_(ptr, self, other)
|
|
}
|
|
func AtgLogicalAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_and_out(ptr, out, self, other)
|
|
}
|
|
func AtgLogicalNot(ptr *Ctensor, self Ctensor){
|
|
C.atg_logical_not(ptr, self)
|
|
}
|
|
func AtgLogicalNot_(ptr *Ctensor, self Ctensor){
|
|
C.atg_logical_not_(ptr, self)
|
|
}
|
|
func AtgLogicalNotOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_logical_not_out(ptr, out, self)
|
|
}
|
|
func AtgLogicalOr(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_or(ptr, self, other)
|
|
}
|
|
func AtgLogicalOr_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_or_(ptr, self, other)
|
|
}
|
|
func AtgLogicalOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_or_out(ptr, out, self, other)
|
|
}
|
|
func AtgLogicalXor(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_xor(ptr, self, other)
|
|
}
|
|
func AtgLogicalXor_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_xor_(ptr, self, other)
|
|
}
|
|
func AtgLogicalXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_logical_xor_out(ptr, out, self, other)
|
|
}
|
|
func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, base float64, optionsKind int32, optionsDevice int32){
|
|
csteps := *(*C.int64_t)(unsafe.Pointer(&steps))
|
|
cbase := *(*C.double)(unsafe.Pointer(&base))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_logspace(ptr, start , end , csteps, cbase, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64, base float64){
|
|
csteps := *(*C.int64_t)(unsafe.Pointer(&steps))
|
|
cbase := *(*C.double)(unsafe.Pointer(&base))
|
|
C.atg_logspace_out(ptr, out, start , end , csteps, cbase)
|
|
}
|
|
func AtgLogsumexp(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_logsumexp(ptr, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgLogsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_logsumexp_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){
|
|
chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0]))
|
|
chxLen := *(*C.int)(unsafe.Pointer(&hxLen))
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
C.atg_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst)
|
|
}
|
|
func AtgLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){
|
|
chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0]))
|
|
chxLen := *(*C.int)(unsafe.Pointer(&hxLen))
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
C.atg_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional)
|
|
}
|
|
func AtgLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){
|
|
chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0]))
|
|
chxLen := *(*C.int)(unsafe.Pointer(&hxLen))
|
|
C.atg_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh)
|
|
}
|
|
func AtgLstsq(ptr *Ctensor, self Ctensor, a Ctensor){
|
|
C.atg_lstsq(ptr, self, a)
|
|
}
|
|
func AtgLstsqOut(ptr *Ctensor, x Ctensor, qr Ctensor, self Ctensor, a Ctensor){
|
|
C.atg_lstsq_out(ptr, x, qr, self, a)
|
|
}
|
|
func AtgLt(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_lt(ptr, self, other )
|
|
}
|
|
func AtgLt1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_lt1(ptr, self, other)
|
|
}
|
|
func AtgLt_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_lt_(ptr, self, other )
|
|
}
|
|
func AtgLt1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_lt_1(ptr, self, other)
|
|
}
|
|
func AtgLtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_lt_out(ptr, out, self, other )
|
|
}
|
|
func AtgLtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_lt_out1(ptr, out, self, other)
|
|
}
|
|
func AtgLuSolve(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){
|
|
C.atg_lu_solve(ptr, self, lUData, lUPivots)
|
|
}
|
|
func AtgLuSolveOut(ptr *Ctensor, out Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){
|
|
C.atg_lu_solve_out(ptr, out, self, lUData, lUPivots)
|
|
}
|
|
func AtgMarginRankingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){
|
|
cmargin := *(*C.double)(unsafe.Pointer(&margin))
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_margin_ranking_loss(ptr, input1, input2, target, cmargin, creduction)
|
|
}
|
|
func AtgMaskedFill(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){
|
|
C.atg_masked_fill(ptr, self, mask, value )
|
|
}
|
|
func AtgMaskedFill1(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){
|
|
C.atg_masked_fill1(ptr, self, mask, value)
|
|
}
|
|
func AtgMaskedFill_(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){
|
|
C.atg_masked_fill_(ptr, self, mask, value )
|
|
}
|
|
func AtgMaskedFill1_(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){
|
|
C.atg_masked_fill_1(ptr, self, mask, value)
|
|
}
|
|
func AtgMaskedScatter(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){
|
|
C.atg_masked_scatter(ptr, self, mask, source)
|
|
}
|
|
func AtgMaskedScatter_(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){
|
|
C.atg_masked_scatter_(ptr, self, mask, source)
|
|
}
|
|
func AtgMaskedSelect(ptr *Ctensor, self Ctensor, mask Ctensor){
|
|
C.atg_masked_select(ptr, self, mask)
|
|
}
|
|
func AtgMaskedSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, mask Ctensor){
|
|
C.atg_masked_select_out(ptr, out, self, mask)
|
|
}
|
|
func AtgMatmul(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_matmul(ptr, self, other)
|
|
}
|
|
func AtgMatmulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_matmul_out(ptr, out, self, other)
|
|
}
|
|
func AtgMatrixPower(ptr *Ctensor, self Ctensor, n int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
C.atg_matrix_power(ptr, self, cn)
|
|
}
|
|
func AtgMatrixRank(ptr *Ctensor, self Ctensor, symmetric int32){
|
|
csymmetric := *(*C.int)(unsafe.Pointer(&symmetric))
|
|
C.atg_matrix_rank(ptr, self, csymmetric)
|
|
}
|
|
func AtgMatrixRank1(ptr *Ctensor, self Ctensor, tol float64, symmetric int32){
|
|
ctol := *(*C.double)(unsafe.Pointer(&tol))
|
|
csymmetric := *(*C.int)(unsafe.Pointer(&symmetric))
|
|
C.atg_matrix_rank1(ptr, self, ctol, csymmetric)
|
|
}
|
|
func AtgMax(ptr *Ctensor, self Ctensor){
|
|
C.atg_max(ptr, self)
|
|
}
|
|
func AtgMax1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_max1(ptr, self, other)
|
|
}
|
|
func AtgMax2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_max2(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_max_out(ptr, out, self, other)
|
|
}
|
|
func AtgMaxOut1(ptr *Ctensor, max Ctensor, maxValues Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_max_out1(ptr, max, maxValues, self, cdim, ckeepdim)
|
|
}
|
|
func AtgMaxPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxPool1dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool1d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxPool2dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool2d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxPool2dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool2d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices)
|
|
}
|
|
func AtgMaxPool2dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool2d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices)
|
|
}
|
|
func AtgMaxPool2dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool2d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxPool3dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool3d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxPool3dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool3d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices)
|
|
}
|
|
func AtgMaxPool3dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool3d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices)
|
|
}
|
|
func AtgMaxPool3dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_max_pool3d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMaxUnpool2d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_max_unpool2d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgMaxUnpool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_max_unpool2d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgMaxUnpool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_max_unpool2d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgMaxUnpool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_max_unpool2d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgMaxUnpool3d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_max_unpool3d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgMaxUnpool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_max_unpool3d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgMaxUnpool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_max_unpool3d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgMaxUnpool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_max_unpool3d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgMaxValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_max_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgMean(ptr *Ctensor, self Ctensor, dtype int32){
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_mean(ptr, self, cdtype)
|
|
}
|
|
func AtgMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_mean1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype)
|
|
}
|
|
func AtgMeanOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_mean_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype)
|
|
}
|
|
func AtgMedian(ptr *Ctensor, self Ctensor){
|
|
C.atg_median(ptr, self)
|
|
}
|
|
func AtgMedian1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_median1(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgMedianOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_median_out(ptr, values, indices, self, cdim, ckeepdim)
|
|
}
|
|
|
|
func AtgMin(ptr *Ctensor, self Ctensor){
|
|
C.atg_min(ptr, self)
|
|
}
|
|
func AtgMin1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_min1(ptr, self, other)
|
|
}
|
|
func AtgMin2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_min2(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgMinOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_min_out(ptr, out, self, other)
|
|
}
|
|
func AtgMinOut1(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_min_out1(ptr, min, minIndices, self, cdim, ckeepdim)
|
|
}
|
|
func AtgMinValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_min_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgMiopenBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor))
|
|
cepsilon := *(*C.double)(unsafe.Pointer(&epsilon))
|
|
C.atg_miopen_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon)
|
|
}
|
|
func AtgMiopenBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64){
|
|
cepsilon := *(*C.double)(unsafe.Pointer(&epsilon))
|
|
C.atg_miopen_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon)
|
|
}
|
|
func AtgMiopenConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenConvolutionBackwardBias(ptr *Ctensor, gradOutput Ctensor){
|
|
C.atg_miopen_convolution_backward_bias(ptr, gradOutput)
|
|
}
|
|
func AtgMiopenConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0]))
|
|
cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0]))
|
|
cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_convolution_transpose(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0]))
|
|
cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenDepthwiseConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_depthwise_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenDepthwiseConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0]))
|
|
cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_depthwise_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenDepthwiseConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){
|
|
cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0]))
|
|
cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark))
|
|
cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic))
|
|
C.atg_miopen_depthwise_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic)
|
|
}
|
|
func AtgMiopenRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){
|
|
cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0]))
|
|
cweightLen := *(*C.int)(unsafe.Pointer(&weightLen))
|
|
cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0))
|
|
cmode := *(*C.int64_t)(unsafe.Pointer(&mode))
|
|
chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0]))
|
|
cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen))
|
|
C.atg_miopen_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState)
|
|
}
|
|
func AtgMkldnnAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
C.atg_mkldnn_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen)
|
|
}
|
|
func AtgMkldnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
C.atg_mkldnn_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups)
|
|
}
|
|
func AtgMkldnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){
|
|
cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0]))
|
|
cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined))
|
|
C.atg_mkldnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined)
|
|
}
|
|
func AtgMkldnnConvolutionBackwardWeights(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){
|
|
cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0]))
|
|
cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined))
|
|
C.atg_mkldnn_convolution_backward_weights(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined)
|
|
}
|
|
func AtgMkldnnLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){
|
|
C.atg_mkldnn_linear(ptr, input, weight, bias)
|
|
}
|
|
func AtgMkldnnMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_mkldnn_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgMkldnnReorderConv2dWeight(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cgroups := *(*C.int64_t)(unsafe.Pointer(&groups))
|
|
C.atg_mkldnn_reorder_conv2d_weight(ptr, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups)
|
|
}
|
|
func AtgMm(ptr *Ctensor, self Ctensor, mat2 Ctensor){
|
|
C.atg_mm(ptr, self, mat2)
|
|
}
|
|
func AtgMmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){
|
|
C.atg_mm_out(ptr, out, self, mat2)
|
|
}
|
|
func AtgMode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_mode(ptr, self, cdim, ckeepdim)
|
|
}
|
|
func AtgModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_mode_out(ptr, values, indices, self, cdim, ckeepdim)
|
|
}
|
|
func AtgMseLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_mse_loss(ptr, self, target, creduction)
|
|
}
|
|
func AtgMseLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_mse_loss_backward(ptr, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgMseLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_mse_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgMseLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_mse_loss_out(ptr, out, self, target, creduction)
|
|
}
|
|
func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_mul(ptr, self, other)
|
|
}
|
|
func AtgMul1(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_mul1(ptr, self, other )
|
|
}
|
|
func AtgMul_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_mul_(ptr, self, other)
|
|
}
|
|
func AtgMul1_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_mul_1(ptr, self, other )
|
|
}
|
|
func AtgMulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_mul_out(ptr, out, self, other)
|
|
}
|
|
func AtgMultiMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_multi_margin_loss_backward(ptr, gradOutput, self, target, p , margin , weight, creduction)
|
|
}
|
|
func AtgMultiMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_multi_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, p , margin , weight, creduction)
|
|
}
|
|
func AtgMultilabelMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_multilabel_margin_loss(ptr, self, target, creduction)
|
|
}
|
|
func AtgMultilabelMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_multilabel_margin_loss_backward(ptr, gradOutput, self, target, creduction, isTarget)
|
|
}
|
|
func AtgMultilabelMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_multilabel_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction, isTarget)
|
|
}
|
|
func AtgMultilabelMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_multilabel_margin_loss_out(ptr, out, self, target, creduction)
|
|
}
|
|
func AtgMultinomial(ptr *Ctensor, self Ctensor, numSamples int64, replacement int32){
|
|
cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples))
|
|
creplacement := *(*C.int)(unsafe.Pointer(&replacement))
|
|
C.atg_multinomial(ptr, self, cnumSamples, creplacement)
|
|
}
|
|
func AtgMultinomialOut(ptr *Ctensor, out Ctensor, self Ctensor, numSamples int64, replacement int32){
|
|
cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples))
|
|
creplacement := *(*C.int)(unsafe.Pointer(&replacement))
|
|
C.atg_multinomial_out(ptr, out, self, cnumSamples, creplacement)
|
|
}
|
|
func AtgMv(ptr *Ctensor, self Ctensor, vec Ctensor){
|
|
C.atg_mv(ptr, self, vec)
|
|
}
|
|
func AtgMvOut(ptr *Ctensor, out Ctensor, self Ctensor, vec Ctensor){
|
|
C.atg_mv_out(ptr, out, self, vec)
|
|
}
|
|
func AtgMvlgamma(ptr *Ctensor, self Ctensor, p int64){
|
|
cp := *(*C.int64_t)(unsafe.Pointer(&p))
|
|
C.atg_mvlgamma(ptr, self, cp)
|
|
}
|
|
func AtgMvlgamma_(ptr *Ctensor, self Ctensor, p int64){
|
|
cp := *(*C.int64_t)(unsafe.Pointer(&p))
|
|
C.atg_mvlgamma_(ptr, self, cp)
|
|
}
|
|
func AtgNarrow(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cstart := *(*C.int64_t)(unsafe.Pointer(&start))
|
|
clength := *(*C.int64_t)(unsafe.Pointer(&length))
|
|
C.atg_narrow(ptr, self, cdim, cstart, clength)
|
|
}
|
|
func AtgNarrow1(ptr *Ctensor, self Ctensor, dim int64, start Ctensor, length int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
clength := *(*C.int64_t)(unsafe.Pointer(&length))
|
|
C.atg_narrow1(ptr, self, cdim, start, clength)
|
|
}
|
|
func AtgNarrowCopy(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cstart := *(*C.int64_t)(unsafe.Pointer(&start))
|
|
clength := *(*C.int64_t)(unsafe.Pointer(&length))
|
|
C.atg_narrow_copy(ptr, self, cdim, cstart, clength)
|
|
}
|
|
func AtgNativeBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
cmomentum := *(*C.double)(unsafe.Pointer(&momentum))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
C.atg_native_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps)
|
|
}
|
|
func AtgNativeBatchNormOut(ptr *Ctensor, out Ctensor, saveMean Ctensor, saveInvstd Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
cmomentum := *(*C.double)(unsafe.Pointer(&momentum))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
C.atg_native_batch_norm_out(ptr, out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps)
|
|
}
|
|
func AtgNativeLayerNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, m int64, n int64, eps float64){
|
|
cm := *(*C.int64_t)(unsafe.Pointer(&m))
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
C.atg_native_layer_norm(ptr, input, weight, bias, cm, cn, ceps)
|
|
}
|
|
func AtgNativeNorm(ptr *Ctensor, self Ctensor){
|
|
C.atg_native_norm(ptr, self)
|
|
}
|
|
func AtgNe(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_ne(ptr, self, other )
|
|
}
|
|
func AtgNe1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_ne1(ptr, self, other)
|
|
}
|
|
func AtgNe_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_ne_(ptr, self, other )
|
|
}
|
|
func AtgNe1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_ne_1(ptr, self, other)
|
|
}
|
|
func AtgNeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_ne_out(ptr, out, self, other )
|
|
}
|
|
func AtgNeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_ne_out1(ptr, out, self, other)
|
|
}
|
|
func AtgNeg(ptr *Ctensor, self Ctensor){
|
|
C.atg_neg(ptr, self)
|
|
}
|
|
func AtgNeg_(ptr *Ctensor, self Ctensor){
|
|
C.atg_neg_(ptr, self)
|
|
}
|
|
func AtgNegOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_neg_out(ptr, out, self)
|
|
}
|
|
func AtgNewEmpty(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_new_empty(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgNewFull(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_new_full(ptr, self, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgNewZeros(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_new_zeros(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgNllLoss(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss(ptr, self, target, weight, creduction, cignoreIndex)
|
|
}
|
|
func AtgNllLoss2d(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss2d(ptr, self, target, weight, creduction, cignoreIndex)
|
|
}
|
|
func AtgNllLoss2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss2d_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight)
|
|
}
|
|
func AtgNllLoss2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss2d_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight)
|
|
}
|
|
func AtgNllLoss2dOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss2d_out(ptr, out, self, target, weight, creduction, cignoreIndex)
|
|
}
|
|
func AtgNllLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight)
|
|
}
|
|
func AtgNllLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight)
|
|
}
|
|
func AtgNllLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex))
|
|
C.atg_nll_loss_out(ptr, out, self, target, weight, creduction, cignoreIndex)
|
|
}
|
|
func AtgNonzero(ptr *Ctensor, self Ctensor){
|
|
C.atg_nonzero(ptr, self)
|
|
}
|
|
|
|
func AtgNonzeroOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_nonzero_out(ptr, out, self)
|
|
}
|
|
func AtgNorm(ptr *Ctensor, self Ctensor){
|
|
C.atg_norm(ptr, self)
|
|
}
|
|
func AtgNorm1(ptr *Ctensor, self Ctensor, p Cscalar, dtype int32){
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_norm1(ptr, self, p , cdtype)
|
|
}
|
|
func AtgNorm2(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_norm2(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgNorm3(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_norm3(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype)
|
|
}
|
|
func AtgNormExceptDim(ptr *Ctensor, v Ctensor, pow int64, dim int64){
|
|
cpow := *(*C.int64_t)(unsafe.Pointer(&pow))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_norm_except_dim(ptr, v, cpow, cdim)
|
|
}
|
|
func AtgNormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_norm_out(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_norm_out1(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype)
|
|
}
|
|
func AtgNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){
|
|
cmean := *(*C.double)(unsafe.Pointer(&mean))
|
|
cstd := *(*C.double)(unsafe.Pointer(&std))
|
|
C.atg_normal_(ptr, self, cmean, cstd)
|
|
}
|
|
func AtgNormalOut(ptr *Ctensor, out Ctensor, mean Ctensor, std float64){
|
|
cstd := *(*C.double)(unsafe.Pointer(&std))
|
|
C.atg_normal_out(ptr, out, mean, cstd)
|
|
}
|
|
func AtgNormalOut1(ptr *Ctensor, out Ctensor, mean float64, std Ctensor){
|
|
cmean := *(*C.double)(unsafe.Pointer(&mean))
|
|
C.atg_normal_out1(ptr, out, cmean, std)
|
|
}
|
|
func AtgNormalOut2(ptr *Ctensor, out Ctensor, mean Ctensor, std Ctensor){
|
|
C.atg_normal_out2(ptr, out, mean, std)
|
|
}
|
|
func AtgNormalOut3(ptr *Ctensor, out Ctensor, mean float64, std float64, sizeData []int64, sizeLen int){
|
|
cmean := *(*C.double)(unsafe.Pointer(&mean))
|
|
cstd := *(*C.double)(unsafe.Pointer(&std))
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_normal_out3(ptr, out, cmean, cstd, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgNuclearNorm(ptr *Ctensor, self Ctensor, keepdim int32){
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_nuclear_norm(ptr, self, ckeepdim)
|
|
}
|
|
func AtgNuclearNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_nuclear_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgNuclearNormOut(ptr *Ctensor, out Ctensor, self Ctensor, keepdim int32){
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_nuclear_norm_out(ptr, out, self, ckeepdim)
|
|
}
|
|
func AtgNuclearNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_nuclear_norm_out1(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim)
|
|
}
|
|
func AtgNumpyT(ptr *Ctensor, self Ctensor){
|
|
C.atg_numpy_t(ptr, self)
|
|
}
|
|
func AtgOneHot(ptr *Ctensor, self Ctensor, numClasses int64){
|
|
cnumClasses := *(*C.int64_t)(unsafe.Pointer(&numClasses))
|
|
C.atg_one_hot(ptr, self, cnumClasses)
|
|
}
|
|
func AtgOnes(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_ones(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgOnesLike(ptr *Ctensor, self Ctensor){
|
|
C.atg_ones_like(ptr, self)
|
|
}
|
|
func AtgOnesOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_ones_out(ptr, out, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgOrgqr(ptr *Ctensor, self Ctensor, input2 Ctensor){
|
|
C.atg_orgqr(ptr, self, input2)
|
|
}
|
|
func AtgOrgqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor){
|
|
C.atg_orgqr_out(ptr, out, self, input2)
|
|
}
|
|
func AtgOrmqr(ptr *Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){
|
|
cleft := *(*C.int)(unsafe.Pointer(&left))
|
|
ctranspose := *(*C.int)(unsafe.Pointer(&transpose))
|
|
C.atg_ormqr(ptr, self, input2, input3, cleft, ctranspose)
|
|
}
|
|
func AtgOrmqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){
|
|
cleft := *(*C.int)(unsafe.Pointer(&left))
|
|
ctranspose := *(*C.int)(unsafe.Pointer(&transpose))
|
|
C.atg_ormqr_out(ptr, out, self, input2, input3, cleft, ctranspose)
|
|
}
|
|
func AtgPairwiseDistance(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, eps float64, keepdim int32){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_pairwise_distance(ptr, x1, x2, cp, ceps, ckeepdim)
|
|
}
|
|
func AtgPdist(ptr *Ctensor, self Ctensor, p float64){
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
C.atg_pdist(ptr, self, cp)
|
|
}
|
|
func AtgPermute(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){
|
|
cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0]))
|
|
cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen))
|
|
C.atg_permute(ptr, self, cdimsDataPtr, cdimsLen)
|
|
}
|
|
func AtgPinMemory(ptr *Ctensor, self Ctensor){
|
|
C.atg_pin_memory(ptr, self)
|
|
}
|
|
func AtgPinverse(ptr *Ctensor, self Ctensor, rcond float64){
|
|
crcond := *(*C.double)(unsafe.Pointer(&rcond))
|
|
C.atg_pinverse(ptr, self, crcond)
|
|
}
|
|
func AtgPixelShuffle(ptr *Ctensor, self Ctensor, upscaleFactor int64){
|
|
cupscaleFactor := *(*C.int64_t)(unsafe.Pointer(&upscaleFactor))
|
|
C.atg_pixel_shuffle(ptr, self, cupscaleFactor)
|
|
}
|
|
func AtgPoisson(ptr *Ctensor, self Ctensor){
|
|
C.atg_poisson(ptr, self)
|
|
}
|
|
func AtgPoissonNllLoss(ptr *Ctensor, input Ctensor, target Ctensor, logInput int32, full int32, eps float64, reduction int64){
|
|
clogInput := *(*C.int)(unsafe.Pointer(&logInput))
|
|
cfull := *(*C.int)(unsafe.Pointer(&full))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_poisson_nll_loss(ptr, input, target, clogInput, cfull, ceps, creduction)
|
|
}
|
|
func AtgPolygamma(ptr *Ctensor, n int64, self Ctensor){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
C.atg_polygamma(ptr, cn, self)
|
|
}
|
|
func AtgPolygamma_(ptr *Ctensor, self Ctensor, n int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
C.atg_polygamma_(ptr, self, cn)
|
|
}
|
|
func AtgPolygammaOut(ptr *Ctensor, out Ctensor, n int64, self Ctensor){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
C.atg_polygamma_out(ptr, out, cn, self)
|
|
}
|
|
func AtgPow(ptr *Ctensor, self Ctensor, exponent Cscalar){
|
|
C.atg_pow(ptr, self, exponent )
|
|
}
|
|
func AtgPow1(ptr *Ctensor, self Ctensor, exponent Ctensor){
|
|
C.atg_pow1(ptr, self, exponent)
|
|
}
|
|
func AtgPow2(ptr *Ctensor, selfScalar Cscalar, exponent Ctensor){
|
|
C.atg_pow2(ptr, selfScalar , exponent)
|
|
}
|
|
func AtgPow_(ptr *Ctensor, self Ctensor, exponent Cscalar){
|
|
C.atg_pow_(ptr, self, exponent )
|
|
}
|
|
func AtgPow1_(ptr *Ctensor, self Ctensor, exponent Ctensor){
|
|
C.atg_pow_1(ptr, self, exponent)
|
|
}
|
|
func AtgPowOut(ptr *Ctensor, out Ctensor, self Ctensor, exponent Cscalar){
|
|
C.atg_pow_out(ptr, out, self, exponent )
|
|
}
|
|
func AtgPowOut1(ptr *Ctensor, out Ctensor, self Ctensor, exponent Ctensor){
|
|
C.atg_pow_out1(ptr, out, self, exponent)
|
|
}
|
|
func AtgPowOut2(ptr *Ctensor, out Ctensor, selfScalar Cscalar, exponent Ctensor){
|
|
C.atg_pow_out2(ptr, out, selfScalar , exponent)
|
|
}
|
|
func AtgPrelu(ptr *Ctensor, self Ctensor, weight Ctensor){
|
|
C.atg_prelu(ptr, self, weight)
|
|
}
|
|
func AtgPreluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor){
|
|
C.atg_prelu_backward(ptr, gradOutput, self, weight)
|
|
}
|
|
func AtgProd(ptr *Ctensor, self Ctensor, dtype int32){
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_prod(ptr, self, cdtype)
|
|
}
|
|
func AtgProd1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_prod1(ptr, self, cdim, ckeepdim, cdtype)
|
|
}
|
|
func AtgProdOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_prod_out(ptr, out, self, cdim, ckeepdim, cdtype)
|
|
}
|
|
func AtgPut_(ptr *Ctensor, self Ctensor, index Ctensor, source Ctensor, accumulate int32){
|
|
caccumulate := *(*C.int)(unsafe.Pointer(&accumulate))
|
|
C.atg_put_(ptr, self, index, source, caccumulate)
|
|
}
|
|
func AtgQPerChannelScales(ptr *Ctensor, self Ctensor){
|
|
C.atg_q_per_channel_scales(ptr, self)
|
|
}
|
|
func AtgQPerChannelZeroPoints(ptr *Ctensor, self Ctensor){
|
|
C.atg_q_per_channel_zero_points(ptr, self)
|
|
}
|
|
func AtgQr(ptr *Ctensor, self Ctensor, some int32){
|
|
csome := *(*C.int)(unsafe.Pointer(&some))
|
|
C.atg_qr(ptr, self, csome)
|
|
}
|
|
func AtgQrOut(ptr *Ctensor, q Ctensor, r Ctensor, self Ctensor, some int32){
|
|
csome := *(*C.int)(unsafe.Pointer(&some))
|
|
C.atg_qr_out(ptr, q, r, self, csome)
|
|
}
|
|
func AtgQuantizePerChannel(ptr *Ctensor, self Ctensor, scales Ctensor, zeroPoints Ctensor, axis int64, dtype int32){
|
|
caxis := *(*C.int64_t)(unsafe.Pointer(&axis))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_quantize_per_channel(ptr, self, scales, zeroPoints, caxis, cdtype)
|
|
}
|
|
func AtgQuantizePerTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, dtype int32){
|
|
cscale := *(*C.double)(unsafe.Pointer(&scale))
|
|
czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_quantize_per_tensor(ptr, self, cscale, czeroPoint, cdtype)
|
|
}
|
|
func AtgQuantizedBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, vari Ctensor, eps float64, outputScale float64, outputZeroPoint int64){
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
coutputScale := *(*C.double)(unsafe.Pointer(&outputScale))
|
|
coutputZeroPoint := *(*C.int64_t)(unsafe.Pointer(&outputZeroPoint))
|
|
C.atg_quantized_batch_norm(ptr, input, weight, bias, mean, vari, ceps, coutputScale, coutputZeroPoint)
|
|
}
|
|
func AtgQuantizedGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
C.atg_quantized_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst)
|
|
}
|
|
func AtgQuantizedGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
C.atg_quantized_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional)
|
|
}
|
|
func AtgQuantizedGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){
|
|
C.atg_quantized_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh )
|
|
}
|
|
func AtgQuantizedLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32, dtype int32, useDynamic int32){
|
|
chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0]))
|
|
chxLen := *(*C.int)(unsafe.Pointer(&hxLen))
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic))
|
|
C.atg_quantized_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst, cdtype, cuseDynamic)
|
|
}
|
|
func AtgQuantizedLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, dtype int32, useDynamic int32){
|
|
chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0]))
|
|
chxLen := *(*C.int)(unsafe.Pointer(&hxLen))
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic))
|
|
C.atg_quantized_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cdtype, cuseDynamic)
|
|
}
|
|
func AtgQuantizedLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){
|
|
chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0]))
|
|
chxLen := *(*C.int)(unsafe.Pointer(&hxLen))
|
|
C.atg_quantized_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh )
|
|
}
|
|
func AtgQuantizedMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode))
|
|
C.atg_quantized_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode)
|
|
}
|
|
func AtgQuantizedRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){
|
|
C.atg_quantized_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh )
|
|
}
|
|
func AtgQuantizedRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){
|
|
C.atg_quantized_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh )
|
|
}
|
|
func AtgRand(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_rand(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgRandLike(ptr *Ctensor, self Ctensor){
|
|
C.atg_rand_like(ptr, self)
|
|
}
|
|
func AtgRandOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_rand_out(ptr, out, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgRandint(ptr *Ctensor, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
chigh := *(*C.int64_t)(unsafe.Pointer(&high))
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_randint(ptr, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgRandint1(ptr *Ctensor, low int64, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
clow := *(*C.int64_t)(unsafe.Pointer(&low))
|
|
chigh := *(*C.int64_t)(unsafe.Pointer(&high))
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_randint1(ptr, clow, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgRandintLike(ptr *Ctensor, self Ctensor, high int64){
|
|
chigh := *(*C.int64_t)(unsafe.Pointer(&high))
|
|
C.atg_randint_like(ptr, self, chigh)
|
|
}
|
|
func AtgRandintLike1(ptr *Ctensor, self Ctensor, low int64, high int64){
|
|
clow := *(*C.int64_t)(unsafe.Pointer(&low))
|
|
chigh := *(*C.int64_t)(unsafe.Pointer(&high))
|
|
C.atg_randint_like1(ptr, self, clow, chigh)
|
|
}
|
|
func AtgRandintOut(ptr *Ctensor, out Ctensor, high int64, sizeData []int64, sizeLen int){
|
|
chigh := *(*C.int64_t)(unsafe.Pointer(&high))
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_randint_out(ptr, out, chigh, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgRandintOut1(ptr *Ctensor, out Ctensor, low int64, high int64, sizeData []int64, sizeLen int){
|
|
clow := *(*C.int64_t)(unsafe.Pointer(&low))
|
|
chigh := *(*C.int64_t)(unsafe.Pointer(&high))
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_randint_out1(ptr, out, clow, chigh, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgRandn(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_randn(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgRandnLike(ptr *Ctensor, self Ctensor){
|
|
C.atg_randn_like(ptr, self)
|
|
}
|
|
func AtgRandnOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_randn_out(ptr, out, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgRandom_(ptr *Ctensor, self Ctensor){
|
|
C.atg_random_(ptr, self)
|
|
}
|
|
func AtgRandom1_(ptr *Ctensor, self Ctensor, to int64){
|
|
cto := *(*C.int64_t)(unsafe.Pointer(&to))
|
|
C.atg_random_1(ptr, self, cto)
|
|
}
|
|
func AtgRandom2(ptr *Ctensor, self Ctensor, from int64, to int64){
|
|
cfrom := *(*C.int64_t)(unsafe.Pointer(&from))
|
|
cto := *(*C.int64_t)(unsafe.Pointer(&to))
|
|
C.atg_random_2(ptr, self, cfrom, cto)
|
|
}
|
|
func AtgRandperm(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_randperm(ptr, cn, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgRandpermOut(ptr *Ctensor, out Ctensor, n int64){
|
|
cn := *(*C.int64_t)(unsafe.Pointer(&n))
|
|
C.atg_randperm_out(ptr, out, cn)
|
|
}
|
|
func AtgRange(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_range(ptr, start , end , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgRange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_range1(ptr, start , end , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgRangeOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){
|
|
C.atg_range_out(ptr, out, start , end )
|
|
}
|
|
func AtgReal(ptr *Ctensor, self Ctensor){
|
|
C.atg_real(ptr, self)
|
|
}
|
|
func AtgReciprocal(ptr *Ctensor, self Ctensor){
|
|
C.atg_reciprocal(ptr, self)
|
|
}
|
|
func AtgReciprocal_(ptr *Ctensor, self Ctensor){
|
|
C.atg_reciprocal_(ptr, self)
|
|
}
|
|
func AtgReciprocalOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_reciprocal_out(ptr, out, self)
|
|
}
|
|
func AtgReflectionPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReflectionPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReflectionPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReflectionPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReflectionPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReflectionPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReflectionPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReflectionPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_reflection_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgRelu(ptr *Ctensor, self Ctensor){
|
|
C.atg_relu(ptr, self)
|
|
}
|
|
func AtgRelu_(ptr *Ctensor, self Ctensor){
|
|
C.atg_relu_(ptr, self)
|
|
}
|
|
func AtgRemainder(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_remainder(ptr, self, other )
|
|
}
|
|
func AtgRemainder1(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_remainder1(ptr, self, other)
|
|
}
|
|
func AtgRemainder_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_remainder_(ptr, self, other )
|
|
}
|
|
func AtgRemainder1_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_remainder_1(ptr, self, other)
|
|
}
|
|
func AtgRemainderOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_remainder_out(ptr, out, self, other )
|
|
}
|
|
func AtgRemainderOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_remainder_out1(ptr, out, self, other)
|
|
}
|
|
func AtgRenorm(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_renorm(ptr, self, p , cdim, maxnorm )
|
|
}
|
|
func AtgRenorm_(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_renorm_(ptr, self, p , cdim, maxnorm )
|
|
}
|
|
func AtgRenormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_renorm_out(ptr, out, self, p , cdim, maxnorm )
|
|
}
|
|
func AtgRepeat(ptr *Ctensor, self Ctensor, repeatsData []int64, repeatsLen int){
|
|
crepeatsDataPtr := (*C.int64_t)(unsafe.Pointer(&repeatsData[0]))
|
|
crepeatsLen := *(*C.int)(unsafe.Pointer(&repeatsLen))
|
|
C.atg_repeat(ptr, self, crepeatsDataPtr, crepeatsLen)
|
|
}
|
|
func AtgRepeatInterleave(ptr *Ctensor, repeats Ctensor){
|
|
C.atg_repeat_interleave(ptr, repeats)
|
|
}
|
|
func AtgRepeatInterleave1(ptr *Ctensor, self Ctensor, repeats Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_repeat_interleave1(ptr, self, repeats, cdim)
|
|
}
|
|
func AtgRepeatInterleave2(ptr *Ctensor, self Ctensor, repeats int64, dim int64){
|
|
crepeats := *(*C.int64_t)(unsafe.Pointer(&repeats))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_repeat_interleave2(ptr, self, crepeats, cdim)
|
|
}
|
|
func AtgReplicationPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad3d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad3d(ptr, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad3d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad3d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgReplicationPad3dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_replication_pad3d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgRequiresGrad_(ptr *Ctensor, self Ctensor, requiresGrad int32){
|
|
crequiresGrad := *(*C.int)(unsafe.Pointer(&requiresGrad))
|
|
C.atg_requires_grad_(ptr, self, crequiresGrad)
|
|
}
|
|
func AtgReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){
|
|
cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0]))
|
|
cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen))
|
|
C.atg_reshape(ptr, self, cshapeDataPtr, cshapeLen)
|
|
}
|
|
func AtgReshapeAs(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_reshape_as(ptr, self, other)
|
|
}
|
|
func AtgResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_resize_(ptr, self, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgResizeAs_(ptr *Ctensor, self Ctensor, theTemplate Ctensor){
|
|
C.atg_resize_as_(ptr, self, theTemplate)
|
|
}
|
|
func AtgRfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32){
|
|
csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim))
|
|
cnormalized := *(*C.int)(unsafe.Pointer(&normalized))
|
|
conesided := *(*C.int)(unsafe.Pointer(&onesided))
|
|
C.atg_rfft(ptr, self, csignalNdim, cnormalized, conesided)
|
|
}
|
|
func AtgRnnRelu(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
C.atg_rnn_relu(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst)
|
|
}
|
|
func AtgRnnRelu1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
C.atg_rnn_relu1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional)
|
|
}
|
|
func AtgRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){
|
|
C.atg_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh)
|
|
}
|
|
func AtgRnnTanh(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst))
|
|
C.atg_rnn_tanh(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst)
|
|
}
|
|
func AtgRnnTanh1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){
|
|
cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0]))
|
|
cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen))
|
|
chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases))
|
|
cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers))
|
|
cdropout := *(*C.double)(unsafe.Pointer(&dropout))
|
|
ctrain := *(*C.int)(unsafe.Pointer(&train))
|
|
cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional))
|
|
C.atg_rnn_tanh1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional)
|
|
}
|
|
func AtgRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){
|
|
C.atg_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh)
|
|
}
|
|
func AtgRoll(ptr *Ctensor, self Ctensor, shiftsData []int64, shiftsLen int, dimsData []int64, dimsLen int){
|
|
cshiftsDataPtr := (*C.int64_t)(unsafe.Pointer(&shiftsData[0]))
|
|
cshiftsLen := *(*C.int)(unsafe.Pointer(&shiftsLen))
|
|
cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0]))
|
|
cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen))
|
|
C.atg_roll(ptr, self, cshiftsDataPtr, cshiftsLen, cdimsDataPtr, cdimsLen)
|
|
}
|
|
func AtgRot90(ptr *Ctensor, self Ctensor, k int64, dimsData []int64, dimsLen int){
|
|
ck := *(*C.int64_t)(unsafe.Pointer(&k))
|
|
cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0]))
|
|
cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen))
|
|
C.atg_rot90(ptr, self, ck, cdimsDataPtr, cdimsLen)
|
|
}
|
|
func AtgRound(ptr *Ctensor, self Ctensor){
|
|
C.atg_round(ptr, self)
|
|
}
|
|
func AtgRound_(ptr *Ctensor, self Ctensor){
|
|
C.atg_round_(ptr, self)
|
|
}
|
|
func AtgRoundOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_round_out(ptr, out, self)
|
|
}
|
|
func AtgRrelu(ptr *Ctensor, self Ctensor, training int32){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
C.atg_rrelu(ptr, self, ctraining)
|
|
}
|
|
func AtgRrelu_(ptr *Ctensor, self Ctensor, training int32){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
C.atg_rrelu_(ptr, self, ctraining)
|
|
}
|
|
func AtgRreluWithNoise(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
C.atg_rrelu_with_noise(ptr, self, noise, ctraining)
|
|
}
|
|
func AtgRreluWithNoise_(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
C.atg_rrelu_with_noise_(ptr, self, noise, ctraining)
|
|
}
|
|
func AtgRreluWithNoiseBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, noise Ctensor, lower Cscalar, upper Cscalar, training int32, selfIsResult int32){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult))
|
|
C.atg_rrelu_with_noise_backward(ptr, gradOutput, self, noise, lower , upper , ctraining, cselfIsResult)
|
|
}
|
|
func AtgRreluWithNoiseOut(ptr *Ctensor, out Ctensor, self Ctensor, noise Ctensor, training int32){
|
|
ctraining := *(*C.int)(unsafe.Pointer(&training))
|
|
C.atg_rrelu_with_noise_out(ptr, out, self, noise, ctraining)
|
|
}
|
|
func AtgRsqrt(ptr *Ctensor, self Ctensor){
|
|
C.atg_rsqrt(ptr, self)
|
|
}
|
|
func AtgRsqrt_(ptr *Ctensor, self Ctensor){
|
|
C.atg_rsqrt_(ptr, self)
|
|
}
|
|
func AtgRsqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_rsqrt_out(ptr, out, self)
|
|
}
|
|
func AtgRsub(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_rsub(ptr, self, other)
|
|
}
|
|
func AtgRsub1(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_rsub1(ptr, self, other )
|
|
}
|
|
func AtgScalarTensor(ptr *Ctensor, s Cscalar, optionsKind int32, optionsDevice int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_scalar_tensor(ptr, s , coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgScatter(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_scatter(ptr, self, cdim, index, src)
|
|
}
|
|
func AtgScatter1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_scatter1(ptr, self, cdim, index, value )
|
|
}
|
|
func AtgScatter_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_scatter_(ptr, self, cdim, index, src)
|
|
}
|
|
func AtgScatter1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_scatter_1(ptr, self, cdim, index, value )
|
|
}
|
|
func AtgScatterAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_scatter_add(ptr, self, cdim, index, src)
|
|
}
|
|
func AtgScatterAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_scatter_add_(ptr, self, cdim, index, src)
|
|
}
|
|
func AtgSelect(ptr *Ctensor, self Ctensor, dim int64, index int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cindex := *(*C.int64_t)(unsafe.Pointer(&index))
|
|
C.atg_select(ptr, self, cdim, cindex)
|
|
}
|
|
func AtgSelu(ptr *Ctensor, self Ctensor){
|
|
C.atg_selu(ptr, self)
|
|
}
|
|
func AtgSelu_(ptr *Ctensor, self Ctensor){
|
|
C.atg_selu_(ptr, self)
|
|
}
|
|
func AtgSet_(ptr *Ctensor, self Ctensor){
|
|
C.atg_set_(ptr, self)
|
|
}
|
|
func AtgSet1_(ptr *Ctensor, self Ctensor, source Ctensor){
|
|
C.atg_set_1(ptr, self, source)
|
|
}
|
|
func AtgSetRequiresGrad(ptr *Ctensor, self Ctensor, r int32){
|
|
cr := *(*C.int)(unsafe.Pointer(&r))
|
|
C.atg_set_requires_grad(ptr, self, cr)
|
|
}
|
|
func AtgSigmoid(ptr *Ctensor, self Ctensor){
|
|
C.atg_sigmoid(ptr, self)
|
|
}
|
|
func AtgSigmoid_(ptr *Ctensor, self Ctensor){
|
|
C.atg_sigmoid_(ptr, self)
|
|
}
|
|
func AtgSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){
|
|
C.atg_sigmoid_backward(ptr, gradOutput, output)
|
|
}
|
|
func AtgSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){
|
|
C.atg_sigmoid_backward_out(ptr, gradInput, gradOutput, output)
|
|
}
|
|
func AtgSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_sigmoid_out(ptr, out, self)
|
|
}
|
|
func AtgSign(ptr *Ctensor, self Ctensor){
|
|
C.atg_sign(ptr, self)
|
|
}
|
|
func AtgSign_(ptr *Ctensor, self Ctensor){
|
|
C.atg_sign_(ptr, self)
|
|
}
|
|
func AtgSignOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_sign_out(ptr, out, self)
|
|
}
|
|
func AtgSin(ptr *Ctensor, self Ctensor){
|
|
C.atg_sin(ptr, self)
|
|
}
|
|
func AtgSin_(ptr *Ctensor, self Ctensor){
|
|
C.atg_sin_(ptr, self)
|
|
}
|
|
func AtgSinOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_sin_out(ptr, out, self)
|
|
}
|
|
func AtgSinh(ptr *Ctensor, self Ctensor){
|
|
C.atg_sinh(ptr, self)
|
|
}
|
|
func AtgSinh_(ptr *Ctensor, self Ctensor){
|
|
C.atg_sinh_(ptr, self)
|
|
}
|
|
func AtgSinhOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_sinh_out(ptr, out, self)
|
|
}
|
|
func AtgSlice(ptr *Ctensor, self Ctensor, dim int64, start int64, end int64, step int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cstart := *(*C.int64_t)(unsafe.Pointer(&start))
|
|
cend := *(*C.int64_t)(unsafe.Pointer(&end))
|
|
cstep := *(*C.int64_t)(unsafe.Pointer(&step))
|
|
C.atg_slice(ptr, self, cdim, cstart, cend, cstep)
|
|
}
|
|
func AtgSlogdet(ptr *Ctensor, self Ctensor){
|
|
C.atg_slogdet(ptr, self)
|
|
}
|
|
func AtgSlowConv3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_slow_conv3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgSlowConv3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
C.atg_slow_conv3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen)
|
|
}
|
|
func AtgSlowConvDilated2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_slow_conv_dilated2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgSlowConvDilated3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_slow_conv_dilated3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgSlowConvTranspose2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_slow_conv_transpose2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgSlowConvTranspose2dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_slow_conv_transpose2d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgSlowConvTranspose3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_slow_conv_transpose3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgSlowConvTranspose3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){
|
|
ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0]))
|
|
ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen))
|
|
cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0]))
|
|
cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen))
|
|
cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0]))
|
|
cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen))
|
|
coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0]))
|
|
coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen))
|
|
cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0]))
|
|
cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen))
|
|
C.atg_slow_conv_transpose3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen)
|
|
}
|
|
func AtgSmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){
|
|
C.atg_smm(ptr, self, mat2)
|
|
}
|
|
func AtgSmoothL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_smooth_l1_loss(ptr, self, target, creduction)
|
|
}
|
|
func AtgSmoothL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_smooth_l1_loss_backward(ptr, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgSmoothL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_smooth_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgSmoothL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_smooth_l1_loss_out(ptr, out, self, target, creduction)
|
|
}
|
|
func AtgSoftMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_soft_margin_loss(ptr, self, target, creduction)
|
|
}
|
|
func AtgSoftMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_soft_margin_loss_backward(ptr, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgSoftMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_soft_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction)
|
|
}
|
|
func AtgSoftMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_soft_margin_loss_out(ptr, out, self, target, creduction)
|
|
}
|
|
func AtgSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_softmax(ptr, self, cdim, cdtype)
|
|
}
|
|
func AtgSoftplus(ptr *Ctensor, self Ctensor){
|
|
C.atg_softplus(ptr, self)
|
|
}
|
|
func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){
|
|
C.atg_softplus_backward(ptr, gradOutput, self, beta , threshold , output)
|
|
}
|
|
func AtgSoftplusBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){
|
|
C.atg_softplus_backward_out(ptr, gradInput, gradOutput, self, beta , threshold , output)
|
|
}
|
|
func AtgSoftplusOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_softplus_out(ptr, out, self)
|
|
}
|
|
func AtgSoftshrink(ptr *Ctensor, self Ctensor){
|
|
C.atg_softshrink(ptr, self)
|
|
}
|
|
func AtgSoftshrinkBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){
|
|
C.atg_softshrink_backward(ptr, gradOutput, self, lambd )
|
|
}
|
|
func AtgSoftshrinkBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){
|
|
C.atg_softshrink_backward_out(ptr, gradInput, gradOutput, self, lambd )
|
|
}
|
|
func AtgSoftshrinkOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_softshrink_out(ptr, out, self)
|
|
}
|
|
func AtgSolve(ptr *Ctensor, self Ctensor, a Ctensor){
|
|
C.atg_solve(ptr, self, a)
|
|
}
|
|
func AtgSolveOut(ptr *Ctensor, solution Ctensor, lu Ctensor, self Ctensor, a Ctensor){
|
|
C.atg_solve_out(ptr, solution, lu, self, a)
|
|
}
|
|
func AtgSort(ptr *Ctensor, self Ctensor, dim int64, descending int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdescending := *(*C.int)(unsafe.Pointer(&descending))
|
|
C.atg_sort(ptr, self, cdim, cdescending)
|
|
}
|
|
func AtgSortOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, descending int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
cdescending := *(*C.int)(unsafe.Pointer(&descending))
|
|
C.atg_sort_out(ptr, values, indices, self, cdim, cdescending)
|
|
}
|
|
func AtgSparseCooTensor(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_sparse_coo_tensor(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgSparseCooTensor1(ptr *Ctensor, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_sparse_coo_tensor1(ptr, indices, values, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgSparseCooTensor2(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_sparse_coo_tensor2(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgSparseMask(ptr *Ctensor, self Ctensor, mask Ctensor){
|
|
C.atg_sparse_mask(ptr, self, mask)
|
|
}
|
|
func AtgSparseResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim))
|
|
cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim))
|
|
C.atg_sparse_resize_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim)
|
|
}
|
|
func AtgSparseResizeAndClear_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim))
|
|
cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim))
|
|
C.atg_sparse_resize_and_clear_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim)
|
|
}
|
|
|
|
|
|
func AtgSqrt(ptr *Ctensor, self Ctensor){
|
|
C.atg_sqrt(ptr, self)
|
|
}
|
|
func AtgSqrt_(ptr *Ctensor, self Ctensor){
|
|
C.atg_sqrt_(ptr, self)
|
|
}
|
|
func AtgSqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_sqrt_out(ptr, out, self)
|
|
}
|
|
func AtgSquare(ptr *Ctensor, self Ctensor){
|
|
C.atg_square(ptr, self)
|
|
}
|
|
func AtgSquare_(ptr *Ctensor, self Ctensor){
|
|
C.atg_square_(ptr, self)
|
|
}
|
|
func AtgSqueeze(ptr *Ctensor, self Ctensor){
|
|
C.atg_squeeze(ptr, self)
|
|
}
|
|
func AtgSqueeze1(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_squeeze1(ptr, self, cdim)
|
|
}
|
|
func AtgSqueeze_(ptr *Ctensor, self Ctensor){
|
|
C.atg_squeeze_(ptr, self)
|
|
}
|
|
func AtgSqueeze1_(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_squeeze_1(ptr, self, cdim)
|
|
}
|
|
func AtgSspaddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){
|
|
C.atg_sspaddmm(ptr, self, mat1, mat2)
|
|
}
|
|
func AtgSspaddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){
|
|
C.atg_sspaddmm_out(ptr, out, self, mat1, mat2)
|
|
}
|
|
func AtgStack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_stack(ptr, ctensorsDataPtr, ctensorsLen, cdim)
|
|
}
|
|
func AtgStackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){
|
|
ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0]))
|
|
ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_stack_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim)
|
|
}
|
|
func AtgStd(ptr *Ctensor, self Ctensor, unbiased int32){
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
C.atg_std(ptr, self, cunbiased)
|
|
}
|
|
func AtgStd1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_std1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim)
|
|
}
|
|
func AtgStdMean(ptr *Ctensor, self Ctensor, unbiased int32){
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
C.atg_std_mean(ptr, self, cunbiased)
|
|
}
|
|
func AtgStdMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_std_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim)
|
|
}
|
|
func AtgStdOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_std_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim)
|
|
}
|
|
func AtgStft(ptr *Ctensor, self Ctensor, nFft int64, hopLength int64, winLength int64, window Ctensor, normalized int32, onesided int32){
|
|
cnFft := *(*C.int64_t)(unsafe.Pointer(&nFft))
|
|
chopLength := *(*C.int64_t)(unsafe.Pointer(&hopLength))
|
|
cwinLength := *(*C.int64_t)(unsafe.Pointer(&winLength))
|
|
cnormalized := *(*C.int)(unsafe.Pointer(&normalized))
|
|
conesided := *(*C.int)(unsafe.Pointer(&onesided))
|
|
C.atg_stft(ptr, self, cnFft, chopLength, cwinLength, window, cnormalized, conesided)
|
|
}
|
|
func AtgSub(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_sub(ptr, self, other)
|
|
}
|
|
func AtgSub1(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_sub1(ptr, self, other )
|
|
}
|
|
func AtgSub_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_sub_(ptr, self, other)
|
|
}
|
|
func AtgSub1_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_sub_1(ptr, self, other )
|
|
}
|
|
func AtgSubOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_sub_out(ptr, out, self, other)
|
|
}
|
|
func AtgSum(ptr *Ctensor, self Ctensor, dtype int32){
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_sum(ptr, self, cdtype)
|
|
}
|
|
func AtgSum1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_sum1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype)
|
|
}
|
|
func AtgSumOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
C.atg_sum_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype)
|
|
}
|
|
func AtgSumToSize(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_sum_to_size(ptr, self, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgSvd(ptr *Ctensor, self Ctensor, some int32, computeUv int32){
|
|
csome := *(*C.int)(unsafe.Pointer(&some))
|
|
ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv))
|
|
C.atg_svd(ptr, self, csome, ccomputeUv)
|
|
}
|
|
func AtgSvdOut(ptr *Ctensor, u Ctensor, s Ctensor, v Ctensor, self Ctensor, some int32, computeUv int32){
|
|
csome := *(*C.int)(unsafe.Pointer(&some))
|
|
ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv))
|
|
C.atg_svd_out(ptr, u, s, v, self, csome, ccomputeUv)
|
|
}
|
|
func AtgSymeig(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){
|
|
ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors))
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_symeig(ptr, self, ceigenvectors, cupper)
|
|
}
|
|
func AtgSymeigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32, upper int32){
|
|
ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors))
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
C.atg_symeig_out(ptr, e, v, self, ceigenvectors, cupper)
|
|
}
|
|
func AtgT(ptr *Ctensor, self Ctensor){
|
|
C.atg_t(ptr, self)
|
|
}
|
|
func AtgT_(ptr *Ctensor, self Ctensor){
|
|
C.atg_t_(ptr, self)
|
|
}
|
|
func AtgTake(ptr *Ctensor, self Ctensor, index Ctensor){
|
|
C.atg_take(ptr, self, index)
|
|
}
|
|
func AtgTakeOut(ptr *Ctensor, out Ctensor, self Ctensor, index Ctensor){
|
|
C.atg_take_out(ptr, out, self, index)
|
|
}
|
|
func AtgTan(ptr *Ctensor, self Ctensor){
|
|
C.atg_tan(ptr, self)
|
|
}
|
|
func AtgTan_(ptr *Ctensor, self Ctensor){
|
|
C.atg_tan_(ptr, self)
|
|
}
|
|
func AtgTanOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_tan_out(ptr, out, self)
|
|
}
|
|
func AtgTanh(ptr *Ctensor, self Ctensor){
|
|
C.atg_tanh(ptr, self)
|
|
}
|
|
func AtgTanh_(ptr *Ctensor, self Ctensor){
|
|
C.atg_tanh_(ptr, self)
|
|
}
|
|
func AtgTanhBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){
|
|
C.atg_tanh_backward(ptr, gradOutput, output)
|
|
}
|
|
func AtgTanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){
|
|
C.atg_tanh_backward_out(ptr, gradInput, gradOutput, output)
|
|
}
|
|
func AtgTanhOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_tanh_out(ptr, out, self)
|
|
}
|
|
func AtgTensordot(ptr *Ctensor, self Ctensor, other Ctensor, dimsSelfData []int64, dimsSelfLen int, dimsOtherData []int64, dimsOtherLen int){
|
|
cdimsSelfDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsSelfData[0]))
|
|
cdimsSelfLen := *(*C.int)(unsafe.Pointer(&dimsSelfLen))
|
|
cdimsOtherDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsOtherData[0]))
|
|
cdimsOtherLen := *(*C.int)(unsafe.Pointer(&dimsOtherLen))
|
|
C.atg_tensordot(ptr, self, other, cdimsSelfDataPtr, cdimsSelfLen, cdimsOtherDataPtr, cdimsOtherLen)
|
|
}
|
|
func AtgThreshold(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){
|
|
C.atg_threshold(ptr, self, threshold , value )
|
|
}
|
|
func AtgThreshold_(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){
|
|
C.atg_threshold_(ptr, self, threshold , value )
|
|
}
|
|
func AtgThresholdBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, threshold Cscalar){
|
|
C.atg_threshold_backward(ptr, gradOutput, self, threshold )
|
|
}
|
|
func AtgThresholdOut(ptr *Ctensor, out Ctensor, self Ctensor, threshold Cscalar, value Cscalar){
|
|
C.atg_threshold_out(ptr, out, self, threshold , value )
|
|
}
|
|
func AtgTo(ptr *Ctensor, self Ctensor, device int32){
|
|
cdevice := *(*C.int)(unsafe.Pointer(&device))
|
|
C.atg_to(ptr, self, cdevice)
|
|
}
|
|
func AtgTo1(ptr *Ctensor, self Ctensor, optionsKind int32, optionsDevice int32, nonBlocking int32, copy int32){
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
ccopy := *(*C.int)(unsafe.Pointer(©))
|
|
C.atg_to1(ptr, self, coptionsKind, coptionsDevice, cnonBlocking, ccopy)
|
|
}
|
|
func AtgTo2(ptr *Ctensor, self Ctensor, dtype int32, nonBlocking int32, copy int32){
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
ccopy := *(*C.int)(unsafe.Pointer(©))
|
|
C.atg_to2(ptr, self, cdtype, cnonBlocking, ccopy)
|
|
}
|
|
func AtgTo3(ptr *Ctensor, self Ctensor, other Ctensor, nonBlocking int32, copy int32){
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
ccopy := *(*C.int)(unsafe.Pointer(©))
|
|
C.atg_to3(ptr, self, other, cnonBlocking, ccopy)
|
|
}
|
|
func AtgTo4(ptr *Ctensor, self Ctensor, device int32, dtype int32, nonBlocking int32, copy int32){
|
|
cdevice := *(*C.int)(unsafe.Pointer(&device))
|
|
cdtype := *(*C.int)(unsafe.Pointer(&dtype))
|
|
cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking))
|
|
ccopy := *(*C.int)(unsafe.Pointer(©))
|
|
C.atg_to4(ptr, self, cdevice, cdtype, cnonBlocking, ccopy)
|
|
}
|
|
func AtgToDense(ptr *Ctensor, self Ctensor){
|
|
C.atg_to_dense(ptr, self)
|
|
}
|
|
func AtgToDenseBackward(ptr *Ctensor, grad Ctensor, input Ctensor){
|
|
C.atg_to_dense_backward(ptr, grad, input)
|
|
}
|
|
func AtgToMkldnn(ptr *Ctensor, self Ctensor){
|
|
C.atg_to_mkldnn(ptr, self)
|
|
}
|
|
func AtgToMkldnnBackward(ptr *Ctensor, grad Ctensor, input Ctensor){
|
|
C.atg_to_mkldnn_backward(ptr, grad, input)
|
|
}
|
|
func AtgToSparse(ptr *Ctensor, self Ctensor){
|
|
C.atg_to_sparse(ptr, self)
|
|
}
|
|
func AtgToSparse1(ptr *Ctensor, self Ctensor, sparseDim int64){
|
|
csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim))
|
|
C.atg_to_sparse1(ptr, self, csparseDim)
|
|
}
|
|
func AtgTopk(ptr *Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){
|
|
ck := *(*C.int64_t)(unsafe.Pointer(&k))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
clargest := *(*C.int)(unsafe.Pointer(&largest))
|
|
csorted := *(*C.int)(unsafe.Pointer(&sorted))
|
|
C.atg_topk(ptr, self, ck, cdim, clargest, csorted)
|
|
}
|
|
func AtgTopkOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){
|
|
ck := *(*C.int64_t)(unsafe.Pointer(&k))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
clargest := *(*C.int)(unsafe.Pointer(&largest))
|
|
csorted := *(*C.int)(unsafe.Pointer(&sorted))
|
|
C.atg_topk_out(ptr, values, indices, self, ck, cdim, clargest, csorted)
|
|
}
|
|
func AtgTotype(ptr *Ctensor, self Ctensor, scalarType int32){
|
|
cscalarType := *(*C.int)(unsafe.Pointer(&scalarType))
|
|
C.atg_totype(ptr, self, cscalarType)
|
|
}
|
|
func AtgTrace(ptr *Ctensor, self Ctensor){
|
|
C.atg_trace(ptr, self)
|
|
}
|
|
func AtgTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){
|
|
cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0))
|
|
cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1))
|
|
C.atg_transpose(ptr, self, cdim0, cdim1)
|
|
}
|
|
func AtgTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){
|
|
cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0))
|
|
cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1))
|
|
C.atg_transpose_(ptr, self, cdim0, cdim1)
|
|
}
|
|
func AtgTrapz(ptr *Ctensor, y Ctensor, x Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_trapz(ptr, y, x, cdim)
|
|
}
|
|
func AtgTrapz1(ptr *Ctensor, y Ctensor, dx float64, dim int64){
|
|
cdx := *(*C.double)(unsafe.Pointer(&dx))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_trapz1(ptr, y, cdx, cdim)
|
|
}
|
|
func AtgTriangularSolve(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
ctranspose := *(*C.int)(unsafe.Pointer(&transpose))
|
|
cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular))
|
|
C.atg_triangular_solve(ptr, self, a, cupper, ctranspose, cunitriangular)
|
|
}
|
|
func AtgTriangularSolveOut(ptr *Ctensor, x Ctensor, m Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){
|
|
cupper := *(*C.int)(unsafe.Pointer(&upper))
|
|
ctranspose := *(*C.int)(unsafe.Pointer(&transpose))
|
|
cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular))
|
|
C.atg_triangular_solve_out(ptr, x, m, self, a, cupper, ctranspose, cunitriangular)
|
|
}
|
|
func AtgTril(ptr *Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_tril(ptr, self, cdiagonal)
|
|
}
|
|
func AtgTril_(ptr *Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_tril_(ptr, self, cdiagonal)
|
|
}
|
|
func AtgTrilIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){
|
|
crow := *(*C.int64_t)(unsafe.Pointer(&row))
|
|
ccol := *(*C.int64_t)(unsafe.Pointer(&col))
|
|
coffset := *(*C.int64_t)(unsafe.Pointer(&offset))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_tril_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgTrilOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_tril_out(ptr, out, self, cdiagonal)
|
|
}
|
|
func AtgTripletMarginLoss(ptr *Ctensor, anchor Ctensor, positive Ctensor, negative Ctensor, margin float64, p float64, eps float64, swap int32, reduction int64){
|
|
cmargin := *(*C.double)(unsafe.Pointer(&margin))
|
|
cp := *(*C.double)(unsafe.Pointer(&p))
|
|
ceps := *(*C.double)(unsafe.Pointer(&eps))
|
|
cswap := *(*C.int)(unsafe.Pointer(&swap))
|
|
creduction := *(*C.int64_t)(unsafe.Pointer(&reduction))
|
|
C.atg_triplet_margin_loss(ptr, anchor, positive, negative, cmargin, cp, ceps, cswap, creduction)
|
|
}
|
|
func AtgTriu(ptr *Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_triu(ptr, self, cdiagonal)
|
|
}
|
|
func AtgTriu_(ptr *Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_triu_(ptr, self, cdiagonal)
|
|
}
|
|
func AtgTriuIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){
|
|
crow := *(*C.int64_t)(unsafe.Pointer(&row))
|
|
ccol := *(*C.int64_t)(unsafe.Pointer(&col))
|
|
coffset := *(*C.int64_t)(unsafe.Pointer(&offset))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_triu_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgTriuOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){
|
|
cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal))
|
|
C.atg_triu_out(ptr, out, self, cdiagonal)
|
|
}
|
|
func AtgTrueDivide(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_true_divide(ptr, self, other)
|
|
}
|
|
func AtgTrueDivide1(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_true_divide1(ptr, self, other )
|
|
}
|
|
func AtgTrueDivide_(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_true_divide_(ptr, self, other)
|
|
}
|
|
func AtgTrueDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){
|
|
C.atg_true_divide_1(ptr, self, other )
|
|
}
|
|
func AtgTrueDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_true_divide_out(ptr, out, self, other)
|
|
}
|
|
func AtgTrunc(ptr *Ctensor, self Ctensor){
|
|
C.atg_trunc(ptr, self)
|
|
}
|
|
func AtgTrunc_(ptr *Ctensor, self Ctensor){
|
|
C.atg_trunc_(ptr, self)
|
|
}
|
|
func AtgTruncOut(ptr *Ctensor, out Ctensor, self Ctensor){
|
|
C.atg_trunc_out(ptr, out, self)
|
|
}
|
|
func AtgTypeAs(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_type_as(ptr, self, other)
|
|
}
|
|
|
|
func AtgUnfold(ptr *Ctensor, self Ctensor, dimension int64, size int64, step int64){
|
|
cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension))
|
|
csize := *(*C.int64_t)(unsafe.Pointer(&size))
|
|
cstep := *(*C.int64_t)(unsafe.Pointer(&step))
|
|
C.atg_unfold(ptr, self, cdimension, csize, cstep)
|
|
}
|
|
func AtgUniform_(ptr *Ctensor, self Ctensor, from float64, to float64){
|
|
cfrom := *(*C.double)(unsafe.Pointer(&from))
|
|
cto := *(*C.double)(unsafe.Pointer(&to))
|
|
C.atg_uniform_(ptr, self, cfrom, cto)
|
|
}
|
|
func AtgUniqueConsecutive(ptr *Ctensor, self Ctensor, returnInverse int32, returnCounts int32, dim int64){
|
|
creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse))
|
|
creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts))
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_unique_consecutive(ptr, self, creturnInverse, creturnCounts, cdim)
|
|
}
|
|
func AtgUniqueDim(ptr *Ctensor, self Ctensor, dim int64, sorted int32, returnInverse int32, returnCounts int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
csorted := *(*C.int)(unsafe.Pointer(&sorted))
|
|
creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse))
|
|
creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts))
|
|
C.atg_unique_dim(ptr, self, cdim, csorted, creturnInverse, creturnCounts)
|
|
}
|
|
func AtgUniqueDimConsecutive(ptr *Ctensor, self Ctensor, dim int64, returnInverse int32, returnCounts int32){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse))
|
|
creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts))
|
|
C.atg_unique_dim_consecutive(ptr, self, cdim, creturnInverse, creturnCounts)
|
|
}
|
|
func AtgUnsqueeze(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_unsqueeze(ptr, self, cdim)
|
|
}
|
|
func AtgUnsqueeze_(ptr *Ctensor, self Ctensor, dim int64){
|
|
cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
|
|
C.atg_unsqueeze_(ptr, self, cdim)
|
|
}
|
|
func AtgUpsampleBicubic2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bicubic2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleBicubic2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bicubic2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleBicubic2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bicubic2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleBicubic2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bicubic2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleBilinear2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bilinear2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleBilinear2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bilinear2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleBilinear2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bilinear2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleBilinear2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_bilinear2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleLinear1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_linear1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales)
|
|
}
|
|
func AtgUpsampleLinear1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_linear1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales)
|
|
}
|
|
func AtgUpsampleLinear1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_linear1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales)
|
|
}
|
|
func AtgUpsampleLinear1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_linear1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales)
|
|
}
|
|
func AtgUpsampleNearest1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_nearest1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscales)
|
|
}
|
|
func AtgUpsampleNearest1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_nearest1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales)
|
|
}
|
|
func AtgUpsampleNearest1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_nearest1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales)
|
|
}
|
|
func AtgUpsampleNearest1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cscales := *(*C.double)(unsafe.Pointer(&scales))
|
|
C.atg_upsample_nearest1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscales)
|
|
}
|
|
func AtgUpsampleNearest2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleNearest2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleNearest2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleNearest2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleNearest3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleNearest3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleNearest3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleNearest3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_nearest3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleTrilinear3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_trilinear3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleTrilinear3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_trilinear3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleTrilinear3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0]))
|
|
cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_trilinear3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgUpsampleTrilinear3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){
|
|
coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0]))
|
|
coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen))
|
|
calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners))
|
|
cscalesD := *(*C.double)(unsafe.Pointer(&scalesD))
|
|
cscalesH := *(*C.double)(unsafe.Pointer(&scalesH))
|
|
cscalesW := *(*C.double)(unsafe.Pointer(&scalesW))
|
|
C.atg_upsample_trilinear3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW)
|
|
}
|
|
func AtgValues(ptr *Ctensor, self Ctensor){
|
|
C.atg_values(ptr, self)
|
|
}
|
|
func AtgVar(ptr *Ctensor, self Ctensor, unbiased int32){
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
C.atg_var(ptr, self, cunbiased)
|
|
}
|
|
func AtgVar1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_var1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim)
|
|
}
|
|
func AtgVarMean(ptr *Ctensor, self Ctensor, unbiased int32){
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
C.atg_var_mean(ptr, self, cunbiased)
|
|
}
|
|
func AtgVarMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_var_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim)
|
|
}
|
|
func AtgVarOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){
|
|
cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0]))
|
|
cdimLen := *(*C.int)(unsafe.Pointer(&dimLen))
|
|
cunbiased := *(*C.int)(unsafe.Pointer(&unbiased))
|
|
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
|
|
C.atg_var_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim)
|
|
}
|
|
func AtgView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_view(ptr, self, csizeDataPtr, csizeLen)
|
|
}
|
|
func AtgViewAs(ptr *Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_view_as(ptr, self, other)
|
|
}
|
|
|
|
func AtgWhere1(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){
|
|
C.atg_where1(ptr, condition, self, other)
|
|
}
|
|
func AtgZero_(ptr *Ctensor, self Ctensor){
|
|
C.atg_zero_(ptr, self)
|
|
}
|
|
func AtgZeros(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind))
|
|
coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice))
|
|
C.atg_zeros(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice)
|
|
}
|
|
func AtgZerosLike(ptr *Ctensor, self Ctensor){
|
|
C.atg_zeros_like(ptr, self)
|
|
}
|
|
func AtgZerosOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){
|
|
csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0]))
|
|
csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen))
|
|
C.atg_zeros_out(ptr, out, csizeDataPtr, csizeLen)
|
|
}
|