diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index 1e11e1fac3b622..28f6dfd9a2ed8f 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -22,7 +22,7 @@ PHI_DEFINE_EXPORTED_bool(convert_all_blocks, true, "Convert all blocks in program into SSAgraphs"); -PHI_DEFINE_EXPORTED_bool(all_blocks_convert_trt, +PHI_DEFINE_EXPORTED_bool(convert_all_blocks_trt, false, "Convert all blocks'Ops into TensorRT Ops"); diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 8ff23cdac04455..0949c49b4594b4 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -28,7 +28,7 @@ limitations under the License. */ #include "paddle/utils/any.h" PD_DECLARE_bool(convert_all_blocks); -PD_DECLARE_bool(all_blocks_convert_trt); +PD_DECLARE_bool(convert_all_blocks_trt); namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc index 3fcc23c2573f6e..7548d31a2cbab2 100644 --- a/paddle/fluid/framework/ir/graph_helper.cc +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -30,7 +30,7 @@ limitations under the License. */ #endif #include "paddle/common/flags.h" PD_DECLARE_bool(convert_all_blocks); -PD_DECLARE_bool(all_blocks_convert_trt); +PD_DECLARE_bool(convert_all_blocks_trt); PHI_DEFINE_EXPORTED_string(print_sub_graph_dir, "", "FLAGS_print_sub_graph_dir is used " diff --git a/paddle/fluid/framework/ir/pass.cc b/paddle/fluid/framework/ir/pass.cc index 3285034b7d3c48..97eff724dd902e 100644 --- a/paddle/fluid/framework/ir/pass.cc +++ b/paddle/fluid/framework/ir/pass.cc @@ -131,19 +131,19 @@ Graph *Pass::Apply(Graph *graph) const { std::vector subgraph_passes; bool use_xpu = Has("use_xpu") && Get("use_xpu"); bool use_tensorrt = Has("use_tensorrt") && Get("use_tensorrt"); - bool all_blocks_convert = false; + bool convert_all_blocks = false; if (use_xpu) { subgraph_passes = xpu_support_subgraph_passes; - all_blocks_convert = FLAGS_convert_all_blocks; + convert_all_blocks = FLAGS_convert_all_blocks; } else if (use_tensorrt) { subgraph_passes = trt_support_subgraph_passes; - all_blocks_convert = - FLAGS_all_blocks_convert_trt && FLAGS_convert_all_blocks; + convert_all_blocks = + FLAGS_convert_all_blocks_trt && FLAGS_convert_all_blocks; } else { subgraph_passes = gpu_support_subgraph_passes; - all_blocks_convert = FLAGS_convert_all_blocks; + convert_all_blocks = FLAGS_convert_all_blocks; } - if (all_blocks_convert && graph->IsMainGraph() && + if (convert_all_blocks && graph->IsMainGraph() && (std::count(subgraph_passes.begin(), subgraph_passes.end(), Type()) || std::count(support_subgraph_generate_passes.begin(), support_subgraph_generate_passes.end(), diff --git a/paddle/fluid/framework/ir/set_subgraph_edge_pass.cc b/paddle/fluid/framework/ir/set_subgraph_edge_pass.cc index 915c565a8c699e..ad5231e2c002ea 100644 --- a/paddle/fluid/framework/ir/set_subgraph_edge_pass.cc +++ b/paddle/fluid/framework/ir/set_subgraph_edge_pass.cc @@ -27,7 +27,7 @@ namespace paddle::framework::ir { // Delete dequantize_linear_op, then dequantize weight void SetSubgraphEdge::ApplyImpl(Graph *graph) const { - if (!(FLAGS_all_blocks_convert_trt && FLAGS_convert_all_blocks)) { + if (!(FLAGS_convert_all_blocks_trt && FLAGS_convert_all_blocks)) { VLOG(3) << "Running set_subgraph_edge_pass need set environment variables: " "export FLAGS_convert_all_blocks = true"; return;