Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion paddle/fluid/framework/ir/constant_folding_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ class Node;
* When a op's inputs and outputs is determined before feeding data to the
* model, we can remove this op from the model. This ConstantFolding pass can
* remove all these like ops.
*
*/

namespace paddle {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,7 @@ void Conv2dTransFilterDilationsNxNTo1x1Pass::conv2d_dilation_trans(
graph, "has_quant_info", "var_quant_scales", new_weights_scale);
} else {
VLOG(3) << "Transfilter only support float32/float16/int8 dtype of "
"weights -- do "
"nothing and break.";
"weights -- do nothing and break.";
return;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ struct FlowGraph {
// Since VarDesc doesn't store layout, in pir we set all layout to
// NCHW after translation. However, we need the real layout to decide
// if we need to alter the operation and value. Here we start from the
// operation who have a dertermined layout and spread its layout to
// operation who have a determined layout and spread its layout to
// its output and inputs recursively.
std::queue<Node> q;
for (auto& n : mutable_nodes) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/args_mapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ void ArgMaxMinMapper(PyObject* args,
bool* keepdims,
bool* flatten,
phi::DataType* dtype) {
// The python params are (x, axis,keepdim,dtype,name) which haven't flatten
// The python params are (x, axis,keepdim,dtype,name) which haven't flatten
// The _C_ops params are (x, axis,keepdim,flatten,dtype) which have flatten
// but haven't name We should parse the python params and convert them to the
// _C_ops params
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -753,7 +753,7 @@ void BindImperative(py::module *m_ptr) {
// c++
// STL and python set/list/dict involve a copy operation that
// prevents pass-by-reference semantics, so it is ok to swap.
// The reaseon why not directly pass
// The reason why not directly pass
// std::shared_ptr<std::unordered_set<std::string>>
// is that pybind11 forbid shared_ptr<T> where T is not custom
// type.
Expand Down
2 changes: 1 addition & 1 deletion test/ipu/test_warpctc_op_ipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def setUp(self):
self.set_op_attrs()

def set_training(self):
# ctcloss only support training currently.
# ctc_loss only support training currently.
self.is_training = True
self.epoch = 1

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_warpctc_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -681,7 +681,7 @@ def test_class_api(self):

np.testing.assert_allclose(loss_pd, loss_np, rtol=1e-05, atol=1)

def test_eager_ctcloss(self):
def test_eager_ctc_loss(self):
def test_functional_api():
self.batch_size = 4
self.num_classes = CUDA_BLOCK_SIZE + 2
Expand Down
Loading