generated new APIs

This commit is contained in:
sugarme 2021-07-22 19:00:53 +10:00
parent bc12fc8605
commit 49bb517da3
7 changed files with 21732 additions and 24770 deletions

View File

@ -25,6 +25,7 @@ let excluded_functions =
; "backward"
; "set_data"
; "_amp_non_finite_check_and_unscale_"
; "_amp_foreach_non_finite_check_and_unscale_"
; "_cummin_helper"
; "_cummax_helper"
; "retain_grad"

View File

@ -106,11 +106,6 @@ cdim := *(*C.int64_t)(unsafe.Pointer(&dim))
ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim))
C.atg__aminmax_dim(ptr, self, cdim, ckeepdim)
}
func Atg_AmpForeachNonFiniteCheckAndUnscale_(ptr *Ctensor, selfData []Ctensor, selfLen int, foundInf Ctensor, invScale Ctensor){
cselfDataPtr := (*Ctensor)(unsafe.Pointer(&selfData[0]))
cselfLen := *(*C.int)(unsafe.Pointer(&selfLen))
C.atg__amp_foreach_non_finite_check_and_unscale_(ptr, cselfDataPtr, cselfLen, foundInf, invScale)
}
func Atg_AmpUpdateScale_(ptr *Ctensor, self Ctensor, growthTracker Ctensor, foundInf Ctensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64){
cscaleGrowthFactor := *(*C.double)(unsafe.Pointer(&scaleGrowthFactor))
cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor))

File diff suppressed because it is too large Load Diff

View File

@ -105,8 +105,13 @@ void at_set_num_threads(int n_threads);
void at_free(tensor);
void at_run_backward(tensor *tensors, int ntensors, tensor *inputs, int ninputs,
tensor *outputs, int keep_graph, int create_graph);
void at_run_backward(tensor *tensors,
int ntensors,
tensor *inputs,
int ninputs,
tensor *outputs,
int keep_graph,
int create_graph);
optimizer ato_adam(double learning_rate, double beta1, double beta2,
double weight_decay);
@ -131,7 +136,7 @@ void ato_step(optimizer);
void ato_free(optimizer);
// TT. APIs for learning rate scheduler
void ato_set_learning_rates(optimizer, double* learning_rates, int lrs_num);
void ato_set_learning_rates(optimizer, double *learning_rates, int lrs_num);
int64_t ato_param_group_num(optimizer);
void ato_get_learning_rates(optimizer, double *lrs, int *ngroup);
void ato_add_param_group(optimizer, tensor *params, int param_num);

View File

@ -212,12 +212,6 @@ void atg__aminmax_dim(tensor *out__, tensor self, int64_t dim, int keepdim) {
)
}
void atg__amp_foreach_non_finite_check_and_unscale_(tensor *out__, tensor *self_data, int self_len, tensor found_inf, tensor inv_scale) {
PROTECT(
auto outputs__ = torch::_amp_foreach_non_finite_check_and_unscale_(of_carray_tensor(self_data, self_len), *found_inf, *inv_scale);
)
}
void atg__amp_update_scale_(tensor *out__, tensor self, tensor growth_tracker, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
PROTECT(
auto outputs__ = torch::_amp_update_scale_(*self, *growth_tracker, *found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);

View File

@ -30,7 +30,6 @@ void atg__add_relu_(tensor *, tensor self, tensor other);
void atg__add_relu_out(tensor *, tensor out, tensor self, tensor other);
void atg__aminmax(tensor *, tensor self);
void atg__aminmax_dim(tensor *, tensor self, int64_t dim, int keepdim);
void atg__amp_foreach_non_finite_check_and_unscale_(tensor *, tensor *self_data, int self_len, tensor found_inf, tensor inv_scale);
void atg__amp_update_scale_(tensor *, tensor self, tensor growth_tracker, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
void atg__baddbmm_mkl_(tensor *, tensor self, tensor batch1, tensor batch2);
void atg__bmm(tensor *, tensor self, tensor mat2, int deterministic);

File diff suppressed because it is too large Load Diff