Skip to content

Commit 0475528

Browse files
committed
fix:lint fixes
1 parent b7465b1 commit 0475528

File tree

5 files changed

+14
-12
lines changed

5 files changed

+14
-12
lines changed

onnxruntime/core/providers/openvino/backend_manager.cc

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,10 @@ BackendManager::BackendManager(SessionContext& session_context,
4444
shared_context_{shared_context} {
4545
subgraph_context_.is_ep_ctx_graph = ep_ctx_handle_.CheckForOVEPCtxNodeInGraph(subgraph);
4646

47+
bool cpu_or_gpu = session_context_.device_type.find("CPU") != std::string::npos ||
48+
session_context_.device_type.find("GPU") != std::string::npos;
49+
bool npu = session_context_.device_type.find("NPU") != std::string::npos;
50+
4751
subgraph_context_.model_precision = [&](const GraphViewer& graph_viewer) {
4852
// return empty if graph has no inputs or if types are not one of FP32/FP16
4953
// else assume the type of the first input
@@ -105,9 +109,7 @@ BackendManager::BackendManager(SessionContext& session_context,
105109
if (ModelHasSymbolicInputDims(subgraph)) {
106110
subgraph_context_.has_dynamic_input_shape = true;
107111
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] Model has symbolic input dims";
108-
if ((session_context_.device_type.find("CPU") != std::string::npos ||
109-
session_context_.device_type.find("GPU") != std::string::npos ||
110-
(session_context_.device_type.find("NPU") != std::string::npos && session_context_.enable_causallm)) &&
112+
if (cpu_or_gpu || (npu && session_context_.enable_causallm) &&
111113
!session_context_.disable_dynamic_shapes) {
112114
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] Starting backend initialization. "
113115
<< "Creating backend Dynamic Shapes";
@@ -481,6 +483,9 @@ BackendManager::ReWriteBatchDimWithOne(const ONNX_NAMESPACE::ModelProto& model_p
481483
void BackendManager::Compute(OrtKernelContext* context) {
482484
Ort::KernelContext ctx(context);
483485
std::chrono::high_resolution_clock::time_point start_compute, end_compute;
486+
bool cpu_or_gpu = session_context_.device_type.find("CPU") != std::string::npos ||
487+
session_context_.device_type.find("GPU") != std::string::npos;
488+
bool npu = session_context_.device_type.find("NPU") != std::string::npos;
484489
#ifdef OPENVINO_FIL_ENABLED
485490
static bool fil_enabled = true;
486491
if (fil_enabled) {
@@ -494,10 +499,7 @@ void BackendManager::Compute(OrtKernelContext* context) {
494499
// disable_dynamic_shapes is always set to true for OV NPU plugin.
495500
if (subgraph_context_.has_dynamic_input_shape &&
496501
!session_context_.disable_dynamic_shapes &&
497-
(session_context_.device_type.find("CPU") != std::string::npos ||
498-
session_context_.device_type.find("GPU") != std::string::npos ||
499-
(session_context_.device_type.find("NPU") != std::string::npos &&
500-
session_context_.enable_causallm))) {
502+
(cpu_or_gpu || (npu && session_context_.enable_causallm))) {
501503
concrete_backend_->Infer(context);
502504
} else if (subgraph_context_.has_dynamic_input_shape) {
503505
std::vector<std::vector<int64_t>> tensor_shapes = GetInputTensorShapes(ctx);

onnxruntime/core/providers/openvino/ibackend.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class IBackend {
1717
virtual void Infer(OrtKernelContext* context) = 0;
1818
virtual ov::CompiledModel GetOVCompiledModel() = 0;
1919
virtual ~IBackend() = default;
20-
virtual void RewindKVCache(size_t index) {};
20+
virtual void RewindKVCache(size_t index) {}
2121
};
2222
using ptr_stream_t = std::unique_ptr<std::istream>;
2323
class BackendFactory {

onnxruntime/core/providers/openvino/ov_interface.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -392,7 +392,7 @@ StatefulOVInferRequest::StatefulOVInferRequest(ov::InferRequest infer_request, s
392392

393393
void StatefulOVInferRequest::PreProcessInferRequest() {
394394
// Workaround: Setting the value here as it cannot be set at the ORT GenAI layer currently.
395-
// TODO: Address this issue and implement the fix at the appropriate layer.
395+
// TODO(ankit): Address this issue and implement the fix at the appropriate layer.
396396
ov::Tensor beam_idx = ov::Tensor(ov::element::i32, {1});
397397
std::fill_n(beam_idx.data<int32_t>(), 1, 0);
398398
ovInfReq.set_tensor("beam_idx", beam_idx);

onnxruntime/core/providers/openvino/ov_interface.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ class OVInferRequest {
136136
ov::InferRequest& GetNewObj() {
137137
return ovInfReq;
138138
}
139-
virtual void RewindKVCache(size_t index) {};
139+
virtual void RewindKVCache(size_t index) {}
140140
};
141141

142142
class StatefulOVInferRequest : public OVInferRequest {

onnxruntime/core/providers/openvino/ov_stateful_patch_utils.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// Copyright (C) Intel Corporation
22
// Licensed under the MIT License
33

4-
#include "ov_stateful_patch_utils.h"
4+
#include "core/providers/openvino/ov_stateful_patch_utils.h"
55

66
namespace onnxruntime {
77
namespace openvino_ep {
@@ -155,7 +155,7 @@ void PatchStatefulDecoder(std::shared_ptr<ov::Model> model) {
155155
}
156156

157157
// By default, batch is the 0 - th but chatglm uses 1 - st dimension as batch
158-
// TODO: Deduce from a model via ordinal reshape(? ) and topology
158+
// TODO(ryan): Deduce from a model via ordinal reshape(? ) and topology
159159
// batch_dim = 1 if config.model_type == "chatglm" and not hasattr(config, "rope_ratio") else 0
160160
auto batch_dim = 0;
161161

0 commit comments

Comments
 (0)