From 1f8834ade07b4706e481cc59d20a5da672c36d7a Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Fri, 2 Apr 2021 15:46:53 +0800 Subject: [PATCH] Fix the nan bug when passing all zero values into clip_by_norm_op. (#30777) (#32038) if all input grads are zero, the output of clip_by_norm will be inf or nan. This pr is used to fix this bug. --- paddle/fluid/operators/clip_by_norm_op.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/clip_by_norm_op.h b/paddle/fluid/operators/clip_by_norm_op.h index a8d1e8e4661..fb21d9fec90 100644 --- a/paddle/fluid/operators/clip_by_norm_op.h +++ b/paddle/fluid/operators/clip_by_norm_op.h @@ -81,7 +81,12 @@ class ClipByNormKernel : public framework::OpKernel { *context.template device_context().eigen_device(); auto temp = (x_norm <= max_norm).template cast(); - auto scaling = temp + (static_cast(1) - temp) * max_norm / x_norm; + auto epsilon = + ((x_norm <= static_cast(1e-30)).all().template cast()) * + static_cast(1e-6); + + auto scaling = + temp + (static_cast(1) - temp) * max_norm / (x_norm + epsilon); Eigen::array one_dim{{1}}; Eigen::DSizes m_dsize(input->numel()); if (context.GetPlace() == platform::CPUPlace()) { -- GitLab