Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[wip][compiled autograd] runtime wrapper for verbose debugging #125417

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 7 additions & 1 deletion torch/_dynamo/compiled_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,13 @@ def end_capture(self, outputs):
"compiled_autograd_graph",
payload_fn=lambda: graph.print_readable(print_output=False),
)
return self.compiler_fn(graph)

def runtime_wrapper(compiled_fn, inputs, sizes, hooks):
# insert debug code here
verbose_log.debug(f"lifted hooks={hooks}")
return compiled_fn(inputs, sizes, hooks)

return runtime_wrapper, self.compiler_fn(graph)

def reorder_accumulate_grad_nodes(self):
"""
Expand Down
7 changes: 5 additions & 2 deletions torch/csrc/dynamo/python_compiled_autograd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,7 @@ struct CacheNode {
std::vector<CacheKeyBuffer> key_storage;
std::vector<SizeInput> expected_sizes;

THPObjectPtr runtime_wrapper;
THPObjectPtr compiled_fn;
};

Expand Down Expand Up @@ -504,7 +505,9 @@ CacheNode* _compiled_autograd_impl(
}
}

cache->compiled_fn = check(call_end_capture(py_compiler, state.outputs));
PyObject* res = check(call_end_capture(py_compiler, state.outputs));
cache->runtime_wrapper = PyTuple_GetItem(res, 0);
cache->compiled_fn = PyTuple_GetItem(res, 1);
state.debug_asserts();
} // End cache miss region

Expand Down Expand Up @@ -552,7 +555,7 @@ variable_list compiled_autograd(
&hooks);

THPObjectPtr pyresult(check(PyObject_CallFunctionObjArgs(
cache->compiled_fn.get(), inputs.get(), sizes.get(), hooks.get(), NULL)));
cache->runtime_wrapper.get(), cache->compiled_fn.get(), inputs.get(), sizes.get(), hooks.get(), NULL)));
variable_list outputs = THPVariable_UnpackList(pyresult);
TORCH_INTERNAL_ASSERT(outputs.size() == output_edges.size());
return outputs;
Expand Down