-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Print the forward's stack when backward op has nan/inf and FLAGS_check_nan_inf_level = 0 #52639
Changes from all commits
200406f
e1bfd38
3b29a43
a7ab44c
f907fd3
144d423
9d787d3
b4eb0c9
78714b7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -296,6 +296,10 @@ class {} : public egr::GradNodeBase {{ | |
|
||
// Node Construction | ||
{} | ||
// Set for forward trace | ||
if (FLAGS_check_nan_inf) {{ | ||
{} | ||
}} | ||
// SetAttributes if needed | ||
{} | ||
// Set TensorWrappers for Forward Inputs if needed | ||
|
@@ -484,7 +488,25 @@ class {} : public egr::GradNodeBase {{ | |
}} | ||
}}""" | ||
|
||
CHECK_NAN_AND_INF_TEMPLATE = """ if (FLAGS_check_nan_inf) {{ egr::CheckTensorHasNanOrInf("{}", {}); }} | ||
CHECK_NAN_AND_INF_TEMPLATE_FORWARD = """ | ||
std::string forward_trace =""; | ||
if (FLAGS_check_nan_inf) {{ | ||
egr::CheckTensorHasNanOrInf("{}", {}); | ||
forward_trace = egr::Controller::Instance().GetPythonStack(); | ||
}} | ||
""" | ||
|
||
CHECK_NAN_AND_INF_TEMPLATE_BACKWARD = """ | ||
if (FLAGS_check_nan_inf) {{ | ||
try{{ | ||
egr::CheckTensorHasNanOrInf("{}", {}); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 那样就需要修改CheckTensorHasNanOrInf了,代码改的比较多了 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 改 |
||
}} catch(...) {{ | ||
LOG(WARNING) << "There are nan/inf in ({})"; | ||
auto forward_trace = GetForwardTrace(); | ||
std::cout<<forward_trace<<std::endl; | ||
std::rethrow_exception(std::current_exception()); | ||
}} | ||
}} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 注意字符串模板中的代码格式,另外为啥要用两个花括号? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这是自动代码生成需要用两层花括号 |
||
""" | ||
|
||
inplace_optional_out_type_map = { | ||
|
@@ -1047,11 +1069,15 @@ def GenerateNodeCreationCodes(self, for_backward=False): | |
|
||
node_event_name = forward_api_name + " node_creation" | ||
node_creation_event_str = f"{indent}paddle::platform::RecordEvent node_creation_record_event(\"{node_event_name}\", paddle::platform::TracerEventType::OperatorInner, 1);\n" | ||
set_forward_trace = ( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 能否设置FLAGS_check_nan_inf为false时不设置? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 可以的,后续和上面的临时变量问题统一修改 |
||
f"{indent} grad_node->SetForwardTrace(forward_trace);" | ||
) | ||
if not for_backward: | ||
self.node_creation_str = FORWARD_BODY_TEMPLATE.format( | ||
node_creation_event_str, | ||
pass_stop_gradient_args_str, | ||
node_construction_str, | ||
set_forward_trace, | ||
set_attributes_str, | ||
set_input_tensor_wrappers_str, | ||
set_grad_out_meta_str, | ||
|
@@ -1426,7 +1452,7 @@ def GenerateForwardDefinitionAndDeclaration(self, is_inplaced): | |
) | ||
|
||
# Check Nan and Inf | ||
check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE.format( | ||
check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE_FORWARD.format( | ||
function_name, "api_result" | ||
) | ||
|
||
|
@@ -2320,8 +2346,8 @@ def GenerateNodeDefinition( | |
{indent}{grad_api_namespace}{backward_api_name}({grad_api_args_str});""" | ||
|
||
# Check Nan and Inf | ||
check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE.format( | ||
backward_api_name, "returns" | ||
check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE_BACKWARD.format( | ||
backward_api_name, "returns", backward_api_name | ||
) | ||
|
||
# Prepare for Node Creation if Necessary | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -121,7 +121,10 @@ def FindParsingFunctionFromAttributeType(atype): | |
NOAMP_DYGRAPH_FUNCTION_TEMPLATE = "decltype({}({})) out = {}({});" | ||
|
||
|
||
FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place)) {{ | ||
FUNCTION_SET_DEVICE_TEMPLATE = """{} | ||
LOG(INFO)<<"this is SetPythonStack"; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这个模板是用于哪里的? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 是的 这个我下个PR再修改 |
||
SetPythonStack(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里需要加 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 不需要,这个调用里面我加了判断 |
||
if (paddle::platform::is_gpu_place(place)) {{ | ||
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) | ||
phi::backends::gpu::SetDeviceId(place.device); | ||
VLOG(4) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device; | ||
|
@@ -170,7 +173,6 @@ def FindParsingFunctionFromAttributeType(atype): | |
#include "paddle/fluid/pybind/eager.h" | ||
#include "paddle/fluid/eager/amp_utils.h" | ||
#include "paddle/fluid/eager/eager_amp_auto_cast.h" | ||
|
||
namespace paddle {{ | ||
namespace pybind {{ | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
|
||
import paddle | ||
|
||
|
||
def main(): | ||
paddle.set_flags({"FLAGS_check_nan_inf": 1, "FLAGS_check_nan_inf_level": 0}) | ||
cpu_place = paddle.CPUPlace() | ||
x = paddle.to_tensor([1, 0.0, 3], stop_gradient=False, place=cpu_place) | ||
y = paddle.to_tensor([0.2, 0.0, 0.5], place=cpu_place) | ||
z = paddle.pow(x, y) | ||
paddle.autograd.backward([z]) | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -78,6 +78,13 @@ def setUp(self): | |
|
||
|
||
class TestNanInfCheckResult(unittest.TestCase): | ||
def setUp(self): | ||
self._python_interp = sys.executable | ||
if os.getenv('WITH_COVERAGE', 'OFF') == 'ON': | ||
self._python_interp += " -m coverage run --branch -p" | ||
|
||
self.env = os.environ.copy() | ||
|
||
def generate_inputs(self, shape, dtype="float32"): | ||
data = np.random.random(size=shape).astype(dtype) | ||
# [-10, 10) | ||
|
@@ -141,6 +148,25 @@ def _check_num_nan_inf(use_cuda): | |
if paddle.fluid.core.is_compiled_with_cuda(): | ||
_check_num_nan_inf(use_cuda=True) | ||
|
||
def test_check_stack(self): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这个测试其实没有必要再费劲加到这个单测里面了。之所以构造这么一个复杂的测试方式,是因为之前CUDA抛出的异常捕获不到。调用栈报错,应该可以直接用 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 不行呀,前向的调用栈是print出来的,无法try... except捕获 |
||
self._python_interp += " check_nan_inf_backward_stack.py" | ||
cmd = self._python_interp | ||
proc = subprocess.Popen( | ||
cmd.split(" "), | ||
stdout=subprocess.PIPE, | ||
stderr=subprocess.PIPE, | ||
env=self.env, | ||
) | ||
|
||
out, err = proc.communicate() | ||
returncode = proc.returncode | ||
|
||
print(out) | ||
print(err) | ||
|
||
# in python3, type(out+err) is 'bytes', need use encode | ||
assert (out + err).find(b' z = paddle.pow(x, y)') != -1 | ||
|
||
def check_nan_inf_level(self, use_cuda, dtype): | ||
shape = [8, 8] | ||
x_np, y_np = self.generate_inputs(shape, dtype) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
是否可以不使用临时变量减少下平凡的构造和析构开销
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
可以的 我下个PR统一修改