Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 84b97e4

Browse files
committedAug 10, 2023
Use enable_training_ops instead of enable_training_core
1 parent 20ff81f commit 84b97e4

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed
 

‎onnxruntime/core/providers/cpu/nn/batch_norm.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ class BatchNorm : public OpKernel {
4747
}
4848

4949
if (is_train_) {
50-
#ifdef ENABLE_TRAINING_CORE
50+
#ifdef ENABLE_TRAINING_OPS
5151
momentum_ = op_kernel_info.GetAttrOrDefault<float>("momentum", 0.9f);
5252
ORT_ENFORCE(is_spatial_, "Training mode only supports spatial BN");
5353
#else
@@ -84,7 +84,7 @@ class BatchNorm : public OpKernel {
8484
// calculate sample_size (including all channels)
8585
size_t sample_size_incl_all_channels = sample_size * C;
8686

87-
#ifdef ENABLE_TRAINING_CORE
87+
#ifdef ENABLE_TRAINING_OPS
8888
AllocatorPtr alloc;
8989
ORT_RETURN_IF_ERROR(p_op_kernel_context->GetTempSpaceAllocator(&alloc));
9090

@@ -111,7 +111,7 @@ class BatchNorm : public OpKernel {
111111
ConstEigenVectorArrayMap<T> scale_arr(scale->Data<T>(), is_spatial_ ? C : sample_size_incl_all_channels);
112112
ConstEigenVectorArrayMap<T> bias_arr(B->Data<T>(), is_spatial_ ? C : sample_size_incl_all_channels);
113113

114-
#ifdef ENABLE_TRAINING_CORE
114+
#ifdef ENABLE_TRAINING_OPS
115115
// Note that we only support spatial BN for training
116116
if (is_train_) {
117117
EigenVectorArrayMap<T> saved_mean_arr(saved_mean->MutableData<T>(), C);
@@ -162,7 +162,7 @@ class BatchNorm : public OpKernel {
162162
ConstEigenVectorArrayMap<T> var_arr(var->Data<T>(), is_spatial_ ? C : sample_size_incl_all_channels);
163163
inv_std = (var_arr + epsilon_).sqrt().inverse();
164164
} else {
165-
#ifdef ENABLE_TRAINING_CORE
165+
#ifdef ENABLE_TRAINING_OPS
166166
EigenVectorArrayMap<T> saved_inv_std_arr(saved_inv_std->MutableData<T>(), C);
167167
saved_inv_std_arr = (saved_inv_std_arr + epsilon_).inverse().sqrt();
168168
inv_std = saved_inv_std_arr;
@@ -171,7 +171,7 @@ class BatchNorm : public OpKernel {
171171

172172
// If we're training, do batch normalization based on computation from this batch
173173
ConstEigenVectorArrayMap<T> mean_arr(
174-
#ifdef ENABLE_TRAINING_CORE
174+
#ifdef ENABLE_TRAINING_OPS
175175
!is_train_ ? mean->Data<T>() : saved_mean->Data<T>(),
176176
#else
177177
mean->Data<T>(),

‎onnxruntime/test/providers/cpu/nn/batch_norm_op_test.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -846,7 +846,7 @@ TEST(BatchNormTest, BatchNorm2d_bfloat16) {
846846
#endif // USE_DNNL
847847

848848
// TODO fix flaky test for CUDA
849-
#ifdef ENABLE_TRAINING_CORE
849+
#ifdef ENABLE_TRAINING_OPS
850850
TEST(BatchNormTest, ForwardTrainingTestWithSavedOutputsOpset9) {
851851
// TODO: Unskip when fixed #41968513
852852
if (DefaultDmlExecutionProvider().get() != nullptr) {
@@ -936,7 +936,7 @@ TEST(BatchNormTest, ForwardTrainingTestOpset15) {
936936
{kCudaExecutionProvider, kRocmExecutionProvider,
937937
kTensorrtExecutionProvider, kOpenVINOExecutionProvider, kDnnlExecutionProvider});
938938
}
939-
#endif // ENABLE_TRAINING_CORE
939+
#endif // ENABLE_TRAINING_OPS
940940

941941
} // namespace test
942942
} // namespace onnxruntime

0 commit comments

Comments
 (0)
Please sign in to comment.