|
| 1 | +import importlib |
| 2 | +import sys |
| 3 | + |
| 4 | + |
| 5 | +def test_model_cost_map_url_from_env(monkeypatch): |
| 6 | + """Ensure `LITELLM_MODEL_COST_MAP_URL` env var is picked up on import and used by get_model_cost_map.""" |
| 7 | + test_url = "https://example.com/test_model_cost_map.json" |
| 8 | + |
| 9 | + # A minimal model cost map we expect to be loaded |
| 10 | + model_json = { |
| 11 | + "my-test-model": { |
| 12 | + "input_cost_per_token": 0.123, |
| 13 | + "output_cost_per_token": 0.456, |
| 14 | + "litellm_provider": "openai", |
| 15 | + "mode": "chat", |
| 16 | + } |
| 17 | + } |
| 18 | + |
| 19 | + class DummyResp: |
| 20 | + def raise_for_status(self): |
| 21 | + return None |
| 22 | + |
| 23 | + def json(self): |
| 24 | + return model_json |
| 25 | + |
| 26 | + # Point litellm at our test URL |
| 27 | + monkeypatch.setenv("LITELLM_MODEL_COST_MAP_URL", test_url) |
| 28 | + |
| 29 | + # Mock httpx.get to return our dummy response |
| 30 | + import httpx |
| 31 | + |
| 32 | + monkeypatch.setattr(httpx, "get", lambda url, timeout=5: DummyResp()) |
| 33 | + |
| 34 | + # Reload the litellm package so top-level import picks up the env var |
| 35 | + if "litellm" in sys.modules: |
| 36 | + importlib.reload(sys.modules["litellm"]) |
| 37 | + else: |
| 38 | + import litellm # noqa: F401 |
| 39 | + importlib.reload(litellm) |
| 40 | + |
| 41 | + import litellm as ll # re-import for assertions |
| 42 | + |
| 43 | + # The package should have picked up the env var and loaded our model map |
| 44 | + assert getattr(ll, "model_cost_map_url") == test_url |
| 45 | + assert "my-test-model" in ll.model_cost |
| 46 | + assert ll.model_cost["my-test-model"]["input_cost_per_token"] == 0.123 |
0 commit comments