Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 3 additions & 20 deletions openviking/server/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,26 +132,9 @@ def main():
print(e, file=sys.stderr)
sys.exit(1)

# Ensure Ollama is running if configured
try:
from openviking_cli.utils.ollama import detect_ollama_in_config, ensure_ollama_for_server

ov_config = OpenVikingConfigSingleton.get_instance()
uses_ollama, ollama_host, ollama_port = detect_ollama_in_config(ov_config)
if uses_ollama:
result = ensure_ollama_for_server(ollama_host, ollama_port)
if result.success:
print(f"Ollama is running at {ollama_host}:{ollama_port}")
else:
print(
f"Warning: Ollama not available at {ollama_host}:{ollama_port}. "
f"Embedding/VLM may fail. ({result.message})",
file=sys.stderr,
)
if result.stderr_output:
print(f" Ollama stderr: {result.stderr_output}", file=sys.stderr)
except Exception as e:
print(f"Warning: Ollama pre-flight check failed: {e}", file=sys.stderr)
from openviking.server.preflight import run_preflight_checks

run_preflight_checks(OpenVikingConfigSingleton.get_instance())

# Override with command line arguments
if args.host is not None:
Expand Down
45 changes: 45 additions & 0 deletions openviking/server/preflight.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Copyright (c) 2026 Beijing Volcano Engine Technology Co., Ltd.
# SPDX-License-Identifier: AGPL-3.0
"""Environment preflight checks for OpenViking HTTP Server.

Encapsulates startup-time environment detection (Ollama availability, etc.)
so that ``bootstrap.py`` can stay focused on argument parsing and server
lifecycle. New environment checks should be added as additional internal
helpers invoked from :func:`run_preflight_checks`.
"""

from __future__ import annotations

import sys


def _check_ollama(ov_config) -> None:
from openviking_cli.utils.ollama import detect_ollama_in_config, ensure_ollama_for_server

uses_ollama, ollama_host, ollama_port = detect_ollama_in_config(ov_config)
if not uses_ollama:
return

result = ensure_ollama_for_server(ollama_host, ollama_port)
if result.success:
print(f"Ollama is running at {ollama_host}:{ollama_port}")
else:
print(
f"Warning: Ollama not available at {ollama_host}:{ollama_port}. "
f"Embedding/VLM may fail. ({result.message})",
file=sys.stderr,
)
if result.stderr_output:
print(f" Ollama stderr: {result.stderr_output}", file=sys.stderr)


def run_preflight_checks(ov_config) -> None:
"""Run all server-startup environment checks.

*ov_config* is an :class:`OpenVikingConfig` instance (typically obtained
via ``OpenVikingConfigSingleton.get_instance()``).
"""
try:
_check_ollama(ov_config)
except Exception as e:
print(f"Warning: Ollama pre-flight check failed: {e}", file=sys.stderr)
61 changes: 61 additions & 0 deletions tests/server/test_preflight.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Copyright (c) 2026 Beijing Volcano Engine Technology Co., Ltd.
# SPDX-License-Identifier: AGPL-3.0
"""Tests for the server preflight module."""

from __future__ import annotations

from unittest.mock import patch

from openviking.server.preflight import run_preflight_checks
from openviking_cli.utils.ollama import OllamaStartResult


class TestRunPreflightChecks:
@patch("openviking_cli.utils.ollama.ensure_ollama_for_server")
@patch("openviking_cli.utils.ollama.detect_ollama_in_config")
def test_noop_when_ollama_not_configured(self, mock_detect, mock_ensure, capsys):
mock_detect.return_value = (False, "localhost", 11434)

run_preflight_checks(ov_config=object())

mock_ensure.assert_not_called()
captured = capsys.readouterr()
assert captured.out == ""
assert captured.err == ""

@patch("openviking_cli.utils.ollama.ensure_ollama_for_server")
@patch("openviking_cli.utils.ollama.detect_ollama_in_config")
def test_ensures_ollama_when_configured(self, mock_detect, mock_ensure, capsys):
mock_detect.return_value = (True, "localhost", 11434)
mock_ensure.return_value = OllamaStartResult(success=True, message="running")

run_preflight_checks(ov_config=object())

mock_ensure.assert_called_once_with("localhost", 11434)
captured = capsys.readouterr()
assert "Ollama is running at localhost:11434" in captured.out

@patch("openviking_cli.utils.ollama.ensure_ollama_for_server")
@patch("openviking_cli.utils.ollama.detect_ollama_in_config")
def test_reports_failure_without_crashing(self, mock_detect, mock_ensure, capsys):
mock_detect.return_value = (True, "gpu-server", 11434)
mock_ensure.return_value = OllamaStartResult(
success=False,
message="Ollama at gpu-server:11434 is not reachable.",
stderr_output="boom",
)

run_preflight_checks(ov_config=object())

captured = capsys.readouterr()
assert "Warning: Ollama not available at gpu-server:11434" in captured.err
assert "Ollama stderr: boom" in captured.err

@patch("openviking_cli.utils.ollama.detect_ollama_in_config")
def test_swallows_unexpected_exceptions(self, mock_detect, capsys):
mock_detect.side_effect = RuntimeError("kaboom")

run_preflight_checks(ov_config=object())

captured = capsys.readouterr()
assert "Warning: Ollama pre-flight check failed: kaboom" in captured.err
Loading