Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/docs/cheatsheet.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ print(f"Final Predicted Answer (after ReAct process): {result.answer}")

```python
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
dspy.configure(rm=colbertv2_wiki17_abstracts)

#Define Retrieve Module
retriever = dspy.Retrieve(k=3)
Expand Down Expand Up @@ -450,7 +450,7 @@ asyncio.run(dspy_program(question="What is DSPy"))

```python
import dspy
dspy.settings.configure(track_usage=True)
dspy.configure(track_usage=True)

result = dspy.ChainOfThought(BasicQA)(question="What is 2+2?")
print(f"Token usage: {result.get_lm_usage()}")
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/learn/programming/language_models.md
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ To enable the Responses API, just set `model_type="responses"` when creating the
import dspy

# Configure DSPy to use the Responses API for your language model
dspy.settings.configure(
dspy.configure(
lm=dspy.LM(
"openai/gpt-5-mini",
model_type="responses",
Expand Down
6 changes: 3 additions & 3 deletions docs/docs/learn/programming/modules.md
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ print(hop(claim="Stephen Curry is the best 3 pointer shooter ever in the human h
DSPy provides built-in tracking of language model usage across all module calls. To enable tracking:

```python
dspy.settings.configure(track_usage=True)
dspy.configure(track_usage=True)
```

Once enabled, you can access usage statistics from any `dspy.Prediction` object:
Expand All @@ -275,7 +275,7 @@ The usage data is returned as a dictionary that maps each language model name to
import dspy

# Configure DSPy with tracking enabled
dspy.settings.configure(
dspy.configure(
lm=dspy.LM("openai/gpt-4o-mini", cache=False),
track_usage=True
)
Expand Down Expand Up @@ -326,7 +326,7 @@ When using DSPy's caching features (either in-memory or on-disk via litellm), ca

```python
# Enable caching
dspy.settings.configure(
dspy.configure(
lm=dspy.LM("openai/gpt-4o-mini", cache=True),
track_usage=True
)
Expand Down
4 changes: 2 additions & 2 deletions docs/docs/tutorials/audio/index.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@
"Now let's configure our LLM which can process input audio. \n",
"\n",
"```python\n",
"dspy.settings.configure(lm=dspy.LM(model='gpt-4o-mini-audio-preview-2024-12-17'))\n",
"dspy.configure(lm=dspy.LM(model='gpt-4o-mini-audio-preview-2024-12-17'))\n",
"```\n",
"\n",
"Note: Using `dspy.Audio` in signatures allows passing in audio directly to the model. "
Expand Down Expand Up @@ -332,7 +332,7 @@
" audio = generate_dspy_audio(raw_line, out.openai_instruction)\n",
" return dspy.Prediction(audio=audio)\n",
" \n",
"dspy.settings.configure(lm=dspy.LM(model='gpt-4o-mini'))"
"dspy.configure(lm=dspy.LM(model='gpt-4o-mini'))"
]
},
{
Expand Down
6 changes: 3 additions & 3 deletions docs/docs/tutorials/cache/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import time

os.environ["OPENAI_API_KEY"] = "{your_openai_key}"

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"), track_usage=True)
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"), track_usage=True)

predict = dspy.Predict("question->answer")

Expand Down Expand Up @@ -167,7 +167,7 @@ import time

os.environ["OPENAI_API_KEY"] = "{your_openai_key}"

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

predict = dspy.Predict("question->answer")

Expand All @@ -193,7 +193,7 @@ from hashlib import sha256

os.environ["OPENAI_API_KEY"] = "{your_openai_key}"

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

class CustomCache(dspy.clients.Cache):

Expand Down
4 changes: 2 additions & 2 deletions docs/docs/tutorials/conversation_history/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import os

os.environ["OPENAI_API_KEY"] = "{your_openai_api_key}"

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

class QA(dspy.Signature):
question: str = dspy.InputField()
Expand Down Expand Up @@ -121,7 +121,7 @@ For example:
```python
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))


class QA(dspy.Signature):
Expand Down
6 changes: 3 additions & 3 deletions docs/docs/tutorials/deployment/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Below, we'll assume you have the following simple DSPy program that you want to
```python
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy_program = dspy.ChainOfThought("question -> answer")
```

Expand Down Expand Up @@ -40,7 +40,7 @@ class Question(BaseModel):

# Configure your language model and 'asyncify' your DSPy program.
lm = dspy.LM("openai/gpt-4o-mini")
dspy.settings.configure(lm=lm, async_max_workers=4) # default is 8
dspy.configure(lm=lm, async_max_workers=4) # default is 8
dspy_program = dspy.ChainOfThought("question -> answer")
dspy_program = dspy.asyncify(dspy_program)

Expand Down Expand Up @@ -163,7 +163,7 @@ mlflow.set_tracking_uri("http://127.0.0.1:5000/")
mlflow.set_experiment("deploy_dspy_program")

lm = dspy.LM("openai/gpt-4o-mini")
dspy.settings.configure(lm=lm)
dspy.configure(lm=lm)

class MyProgram(dspy.Module):
def __init__(self):
Expand Down
4 changes: 2 additions & 2 deletions docs/docs/tutorials/entity_extraction/index.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@
"Specifically, we'll:\n",
"- Create a `PeopleExtraction` DSPy Signature to specify the input (`tokens`) and output (`extracted_people`) fields.\n",
"- Define a `people_extractor` program that uses DSPy's built-in `dspy.ChainOfThought` module to implement the `PeopleExtraction` signature. The program extracts entities referring to people from a list of input tokens using language model (LM) prompting.\n",
"- Use the `dspy.LM` class and `dspy.settings.configure()` method to configure the language model that DSPy will use when invoking the program."
"- Use the `dspy.LM` class and `dspy.configure()` method to configure the language model that DSPy will use when invoking the program."
]
},
{
Expand Down Expand Up @@ -208,7 +208,7 @@
"outputs": [],
"source": [
"lm = dspy.LM(model=\"openai/gpt-4o-mini\")\n",
"dspy.settings.configure(lm=lm)"
"dspy.configure(lm=lm)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/tutorials/image_generation_prompting/index.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
"from IPython.display import display\n",
"\n",
"lm = dspy.LM(model=\"gpt-4o-mini\", temperature=0.5)\n",
"dspy.settings.configure(lm=lm)"
"dspy.configure(lm=lm)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/tutorials/program_of_thought/index.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
"source": [
"llama31_70b = dspy.LM(\"openai/meta-llama/Meta-Llama-3-70b-Instruct\", api_base=\"API_BASE\", api_key=\"None\")\n",
"\n",
"dspy.settings.configure(lm=llama31_70b)"
"dspy.configure(lm=llama31_70b)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/tutorials/saving/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Let's say we have compiled a program with some data, and we want to save the pro
import dspy
from dspy.datasets.gsm8k import GSM8K, gsm8k_metric

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

gsm8k = GSM8K()
gsm8k_trainset = gsm8k.train[:10]
Expand Down
8 changes: 4 additions & 4 deletions docs/docs/tutorials/streaming/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ import asyncio
import dspy

lm = dspy.LM("openai/gpt-4o-mini", cache=False)
dspy.settings.configure(lm=lm)
dspy.configure(lm=lm)


class MyModule(dspy.Module):
Expand Down Expand Up @@ -204,7 +204,7 @@ import asyncio
import dspy

lm = dspy.LM("openai/gpt-4o-mini", cache=False)
dspy.settings.configure(lm=lm)
dspy.configure(lm=lm)


def fetch_user_info(user_name: str):
Expand Down Expand Up @@ -259,7 +259,7 @@ import asyncio
import dspy

lm = dspy.LM("openai/gpt-4o-mini", cache=False)
dspy.settings.configure(lm=lm)
dspy.configure(lm=lm)


class MyModule(dspy.Module):
Expand Down Expand Up @@ -371,7 +371,7 @@ import asyncio
import dspy

lm = dspy.LM("openai/gpt-4o-mini", cache=False)
dspy.settings.configure(lm=lm)
dspy.configure(lm=lm)


class MyModule(dspy.Module):
Expand Down
4 changes: 2 additions & 2 deletions dspy/adapters/types/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class MySignature(dspy.Signature):
```
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

class MySignature(dspy.Signature):
question: str = dspy.InputField()
Expand All @@ -44,7 +44,7 @@ class MySignature(dspy.Signature):
```
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

class MySignature(dspy.Signature):
question: str = dspy.InputField()
Expand Down
2 changes: 1 addition & 1 deletion dspy/datasets/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def from_rm(self, num_samples: int, fields: list[str], input_keys: list[str]) ->
)
except AttributeError:
raise ValueError(
"Retrieval module not found. Please set a retrieval module using `dspy.settings.configure`."
"Retrieval module not found. Please set a retrieval module using `dspy.configure`."
)

def sample(
Expand Down
2 changes: 1 addition & 1 deletion dspy/dsp/utils/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def _ensure_configure_allowed(self):

if not in_ipython and config_owner_async_task != asyncio.current_task():
raise RuntimeError(
"dspy.settings.configure(...) can only be called from the same async task that called it first. Please "
"dspy.configure(...) can only be called from the same async task that called it first. Please "
"use `dspy.context(...)` in other async tasks instead."
)

Expand Down
2 changes: 1 addition & 1 deletion dspy/predict/best_of_n.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(
```python
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

# Define a QA module with chain of thought
qa = dspy.ChainOfThought("question -> answer")
Expand Down
2 changes: 1 addition & 1 deletion dspy/predict/refine.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def __init__(
```python
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

# Define a QA module with chain of thought
qa = dspy.ChainOfThought("question -> answer")
Expand Down
6 changes: 3 additions & 3 deletions dspy/propose/dataset_summary_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def create_dataset_summary(trainset, view_data_batch_size, prompt_model, log_fil
print("\nBootstrapping dataset summary (this will be used to generate instructions)...")
upper_lim = min(len(trainset), view_data_batch_size)
prompt_model = prompt_model if prompt_model else dspy.settings.lm
with dspy.settings.context(lm=prompt_model):
with dspy.context(lm=prompt_model):
observation = dspy.Predict(DatasetDescriptor, n=1, temperature=1.0)(examples=order_input_keys_in_string(trainset[0:upper_lim].__repr__()))
observations = observation["observations"]

Expand All @@ -68,7 +68,7 @@ def create_dataset_summary(trainset, view_data_batch_size, prompt_model, log_fil
if verbose:
print(f"b: {b}")
upper_lim = min(len(trainset), b+view_data_batch_size)
with dspy.settings.context(lm=prompt_model):
with dspy.context(lm=prompt_model):
output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)(prior_observations=observations, examples=order_input_keys_in_string(trainset[b:upper_lim].__repr__()))
if len(output["observations"]) >= 8 and output["observations"][:8].upper() == "COMPLETE":
skips += 1
Expand All @@ -84,7 +84,7 @@ def create_dataset_summary(trainset, view_data_batch_size, prompt_model, log_fil
print(f"e {e}. using observations from past round for a summary.")

if prompt_model:
with dspy.settings.context(lm=prompt_model):
with dspy.context(lm=prompt_model):
summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations)
else:
summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations)
Expand Down
2 changes: 1 addition & 1 deletion dspy/propose/grounded_proposer.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ def propose_instruction_for_predictor(
temperature=self.init_temperature,
)

with dspy.settings.context(lm=rollout_lm):
with dspy.context(lm=rollout_lm):
proposed_instruction = instruction_generator(
demo_candidates=demo_candidates,
pred_i=pred_i,
Expand Down
2 changes: 1 addition & 1 deletion dspy/retrievers/weaviate_rm.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class WeaviateRM(dspy.Retrieve):
llm = dspy.Cohere(model="command-r-plus", api_key=api_key)
weaviate_client = weaviate.connect_to_[local, wcs, custom, embedded]("your-path-here")
retriever_model = WeaviateRM("my_collection_name", weaviate_client=weaviate_client)
dspy.settings.configure(lm=llm, rm=retriever_model)
dspy.configure(lm=llm, rm=retriever_model)

retrieve = dspy.Retrieve(k=1)
topK_passages = retrieve("what are the stages in planning, sanctioning and execution of public works").passages
Expand Down
6 changes: 3 additions & 3 deletions dspy/streaming/streamify.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def streamify(
import asyncio
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
# Create the program and wrap it with streaming functionality
program = dspy.streamify(dspy.Predict("q->a"))

Expand All @@ -88,7 +88,7 @@ async def use_streaming():
import asyncio
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))

class MyStatusMessageProvider(StatusMessageProvider):
def module_start_status_message(self, instance, inputs):
Expand Down Expand Up @@ -121,7 +121,7 @@ async def use_streaming():
import asyncio
import dspy

dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False))
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False))

# Create the program and wrap it with streaming functionality
predict = dspy.Predict("question->answer, reasoning")
Expand Down
4 changes: 2 additions & 2 deletions dspy/teleprompt/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,13 +182,13 @@ def _bootstrap_one_example(self, example, round_idx=0):
predictor_cache = {}

try:
with dspy.settings.context(trace=[], **self.teacher_settings):
with dspy.context(trace=[], **self.teacher_settings):
lm = dspy.settings.lm
# Use a fresh rollout with temperature=1.0 to bypass caches.
lm = lm.copy(rollout_id=round_idx, temperature=1.0) if round_idx > 0 else lm
new_settings = {"lm": lm} if round_idx > 0 else {}

with dspy.settings.context(**new_settings):
with dspy.context(**new_settings):
for name, predictor in teacher.named_predictors():
predictor_cache[name] = predictor.demos
predictor.demos = [x for x in predictor.demos if x != example]
Expand Down
6 changes: 3 additions & 3 deletions dspy/teleprompt/copro_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

Note that this teleprompter takes in the following parameters:

* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)).
* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.configure(lm=task_model)).
* metric: The task metric used for optimization.
* breadth: The number of new prompts to generate at each iteration. Default=10.
* depth: The number of times we should ask our prompt model to generate new prompts, with the history of the past prompts as input. Default=3.
Expand Down Expand Up @@ -156,7 +156,7 @@ def compile(self, student, *, trainset, eval_kwargs):
basic_instruction = self._get_signature(predictor).instructions
basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra["prefix"]
if self.prompt_model:
with dspy.settings.context(lm=self.prompt_model):
with dspy.context(lm=self.prompt_model):
instruct = dspy.Predict(
BasicGenerateInstruction,
n=self.breadth - 1,
Expand Down Expand Up @@ -306,7 +306,7 @@ def compile(self, student, *, trainset, eval_kwargs):

# Generate next batch of potential prompts to optimize, with previous attempts as input
if self.prompt_model:
with dspy.settings.context(lm=self.prompt_model):
with dspy.context(lm=self.prompt_model):
instr = dspy.Predict(
GenerateInstructionGivenAttempts,
n=self.breadth,
Expand Down
Loading