diff --git a/docs/docs/cheatsheet.md b/docs/docs/cheatsheet.md index bb0fd58a8d..ee12c5ed10 100644 --- a/docs/docs/cheatsheet.md +++ b/docs/docs/cheatsheet.md @@ -67,7 +67,7 @@ print(f"Final Predicted Answer (after ReAct process): {result.answer}") ```python colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') -dspy.settings.configure(rm=colbertv2_wiki17_abstracts) +dspy.configure(rm=colbertv2_wiki17_abstracts) #Define Retrieve Module retriever = dspy.Retrieve(k=3) @@ -450,7 +450,7 @@ asyncio.run(dspy_program(question="What is DSPy")) ```python import dspy -dspy.settings.configure(track_usage=True) +dspy.configure(track_usage=True) result = dspy.ChainOfThought(BasicQA)(question="What is 2+2?") print(f"Token usage: {result.get_lm_usage()}") diff --git a/docs/docs/learn/programming/language_models.md b/docs/docs/learn/programming/language_models.md index b6adeb1b53..4152c26dee 100644 --- a/docs/docs/learn/programming/language_models.md +++ b/docs/docs/learn/programming/language_models.md @@ -229,7 +229,7 @@ To enable the Responses API, just set `model_type="responses"` when creating the import dspy # Configure DSPy to use the Responses API for your language model -dspy.settings.configure( +dspy.configure( lm=dspy.LM( "openai/gpt-5-mini", model_type="responses", diff --git a/docs/docs/learn/programming/modules.md b/docs/docs/learn/programming/modules.md index 2f3fff2d9b..5d340d467f 100644 --- a/docs/docs/learn/programming/modules.md +++ b/docs/docs/learn/programming/modules.md @@ -260,7 +260,7 @@ print(hop(claim="Stephen Curry is the best 3 pointer shooter ever in the human h DSPy provides built-in tracking of language model usage across all module calls. To enable tracking: ```python -dspy.settings.configure(track_usage=True) +dspy.configure(track_usage=True) ``` Once enabled, you can access usage statistics from any `dspy.Prediction` object: @@ -275,7 +275,7 @@ The usage data is returned as a dictionary that maps each language model name to import dspy # Configure DSPy with tracking enabled -dspy.settings.configure( +dspy.configure( lm=dspy.LM("openai/gpt-4o-mini", cache=False), track_usage=True ) @@ -326,7 +326,7 @@ When using DSPy's caching features (either in-memory or on-disk via litellm), ca ```python # Enable caching -dspy.settings.configure( +dspy.configure( lm=dspy.LM("openai/gpt-4o-mini", cache=True), track_usage=True ) diff --git a/docs/docs/tutorials/audio/index.ipynb b/docs/docs/tutorials/audio/index.ipynb index 7ae09eb369..41969510f9 100644 --- a/docs/docs/tutorials/audio/index.ipynb +++ b/docs/docs/tutorials/audio/index.ipynb @@ -117,7 +117,7 @@ "Now let's configure our LLM which can process input audio. \n", "\n", "```python\n", - "dspy.settings.configure(lm=dspy.LM(model='gpt-4o-mini-audio-preview-2024-12-17'))\n", + "dspy.configure(lm=dspy.LM(model='gpt-4o-mini-audio-preview-2024-12-17'))\n", "```\n", "\n", "Note: Using `dspy.Audio` in signatures allows passing in audio directly to the model. " @@ -332,7 +332,7 @@ " audio = generate_dspy_audio(raw_line, out.openai_instruction)\n", " return dspy.Prediction(audio=audio)\n", " \n", - "dspy.settings.configure(lm=dspy.LM(model='gpt-4o-mini'))" + "dspy.configure(lm=dspy.LM(model='gpt-4o-mini'))" ] }, { diff --git a/docs/docs/tutorials/cache/index.md b/docs/docs/tutorials/cache/index.md index 6345cc789d..f2cc35727a 100644 --- a/docs/docs/tutorials/cache/index.md +++ b/docs/docs/tutorials/cache/index.md @@ -25,7 +25,7 @@ import time os.environ["OPENAI_API_KEY"] = "{your_openai_key}" -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini"), track_usage=True) +dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"), track_usage=True) predict = dspy.Predict("question->answer") @@ -167,7 +167,7 @@ import time os.environ["OPENAI_API_KEY"] = "{your_openai_key}" -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) +dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) predict = dspy.Predict("question->answer") @@ -193,7 +193,7 @@ from hashlib import sha256 os.environ["OPENAI_API_KEY"] = "{your_openai_key}" -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) +dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) class CustomCache(dspy.clients.Cache): diff --git a/docs/docs/tutorials/conversation_history/index.md b/docs/docs/tutorials/conversation_history/index.md index a3b8bee4cc..cf2195230c 100644 --- a/docs/docs/tutorials/conversation_history/index.md +++ b/docs/docs/tutorials/conversation_history/index.md @@ -12,7 +12,7 @@ import os os.environ["OPENAI_API_KEY"] = "{your_openai_api_key}" -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) +dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) class QA(dspy.Signature): question: str = dspy.InputField() @@ -121,7 +121,7 @@ For example: ```python import dspy -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) +dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) class QA(dspy.Signature): diff --git a/docs/docs/tutorials/deployment/index.md b/docs/docs/tutorials/deployment/index.md index bbc7d77a60..fea3d0c4f4 100644 --- a/docs/docs/tutorials/deployment/index.md +++ b/docs/docs/tutorials/deployment/index.md @@ -7,7 +7,7 @@ Below, we'll assume you have the following simple DSPy program that you want to ```python import dspy -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) +dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) dspy_program = dspy.ChainOfThought("question -> answer") ``` @@ -40,7 +40,7 @@ class Question(BaseModel): # Configure your language model and 'asyncify' your DSPy program. lm = dspy.LM("openai/gpt-4o-mini") -dspy.settings.configure(lm=lm, async_max_workers=4) # default is 8 +dspy.configure(lm=lm, async_max_workers=4) # default is 8 dspy_program = dspy.ChainOfThought("question -> answer") dspy_program = dspy.asyncify(dspy_program) @@ -163,7 +163,7 @@ mlflow.set_tracking_uri("http://127.0.0.1:5000/") mlflow.set_experiment("deploy_dspy_program") lm = dspy.LM("openai/gpt-4o-mini") -dspy.settings.configure(lm=lm) +dspy.configure(lm=lm) class MyProgram(dspy.Module): def __init__(self): diff --git a/docs/docs/tutorials/entity_extraction/index.ipynb b/docs/docs/tutorials/entity_extraction/index.ipynb index f8c5c25f40..df74ee0be3 100644 --- a/docs/docs/tutorials/entity_extraction/index.ipynb +++ b/docs/docs/tutorials/entity_extraction/index.ipynb @@ -172,7 +172,7 @@ "Specifically, we'll:\n", "- Create a `PeopleExtraction` DSPy Signature to specify the input (`tokens`) and output (`extracted_people`) fields.\n", "- Define a `people_extractor` program that uses DSPy's built-in `dspy.ChainOfThought` module to implement the `PeopleExtraction` signature. The program extracts entities referring to people from a list of input tokens using language model (LM) prompting.\n", - "- Use the `dspy.LM` class and `dspy.settings.configure()` method to configure the language model that DSPy will use when invoking the program." + "- Use the `dspy.LM` class and `dspy.configure()` method to configure the language model that DSPy will use when invoking the program." ] }, { @@ -208,7 +208,7 @@ "outputs": [], "source": [ "lm = dspy.LM(model=\"openai/gpt-4o-mini\")\n", - "dspy.settings.configure(lm=lm)" + "dspy.configure(lm=lm)" ] }, { diff --git a/docs/docs/tutorials/image_generation_prompting/index.ipynb b/docs/docs/tutorials/image_generation_prompting/index.ipynb index a4bee30af1..cc266699ee 100644 --- a/docs/docs/tutorials/image_generation_prompting/index.ipynb +++ b/docs/docs/tutorials/image_generation_prompting/index.ipynb @@ -80,7 +80,7 @@ "from IPython.display import display\n", "\n", "lm = dspy.LM(model=\"gpt-4o-mini\", temperature=0.5)\n", - "dspy.settings.configure(lm=lm)" + "dspy.configure(lm=lm)" ] }, { diff --git a/docs/docs/tutorials/program_of_thought/index.ipynb b/docs/docs/tutorials/program_of_thought/index.ipynb index 87f2848b18..fd0d3a8e0e 100644 --- a/docs/docs/tutorials/program_of_thought/index.ipynb +++ b/docs/docs/tutorials/program_of_thought/index.ipynb @@ -69,7 +69,7 @@ "source": [ "llama31_70b = dspy.LM(\"openai/meta-llama/Meta-Llama-3-70b-Instruct\", api_base=\"API_BASE\", api_key=\"None\")\n", "\n", - "dspy.settings.configure(lm=llama31_70b)" + "dspy.configure(lm=llama31_70b)" ] }, { diff --git a/docs/docs/tutorials/saving/index.md b/docs/docs/tutorials/saving/index.md index 0069001e42..1500731409 100644 --- a/docs/docs/tutorials/saving/index.md +++ b/docs/docs/tutorials/saving/index.md @@ -20,7 +20,7 @@ Let's say we have compiled a program with some data, and we want to save the pro import dspy from dspy.datasets.gsm8k import GSM8K, gsm8k_metric -dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) +dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) gsm8k = GSM8K() gsm8k_trainset = gsm8k.train[:10] diff --git a/docs/docs/tutorials/streaming/index.md b/docs/docs/tutorials/streaming/index.md index 8782424d45..8c740dacb6 100644 --- a/docs/docs/tutorials/streaming/index.md +++ b/docs/docs/tutorials/streaming/index.md @@ -130,7 +130,7 @@ import asyncio import dspy lm = dspy.LM("openai/gpt-4o-mini", cache=False) -dspy.settings.configure(lm=lm) +dspy.configure(lm=lm) class MyModule(dspy.Module): @@ -204,7 +204,7 @@ import asyncio import dspy lm = dspy.LM("openai/gpt-4o-mini", cache=False) -dspy.settings.configure(lm=lm) +dspy.configure(lm=lm) def fetch_user_info(user_name: str): @@ -259,7 +259,7 @@ import asyncio import dspy lm = dspy.LM("openai/gpt-4o-mini", cache=False) -dspy.settings.configure(lm=lm) +dspy.configure(lm=lm) class MyModule(dspy.Module): @@ -371,7 +371,7 @@ import asyncio import dspy lm = dspy.LM("openai/gpt-4o-mini", cache=False) -dspy.settings.configure(lm=lm) +dspy.configure(lm=lm) class MyModule(dspy.Module): diff --git a/dspy/adapters/types/history.py b/dspy/adapters/types/history.py index 65867c55b8..2c39d5c4ab 100644 --- a/dspy/adapters/types/history.py +++ b/dspy/adapters/types/history.py @@ -22,7 +22,7 @@ class MySignature(dspy.Signature): ``` import dspy - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) class MySignature(dspy.Signature): question: str = dspy.InputField() @@ -44,7 +44,7 @@ class MySignature(dspy.Signature): ``` import dspy - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) class MySignature(dspy.Signature): question: str = dspy.InputField() diff --git a/dspy/datasets/dataloader.py b/dspy/datasets/dataloader.py index 559553b2b5..45f3236b2d 100644 --- a/dspy/datasets/dataloader.py +++ b/dspy/datasets/dataloader.py @@ -132,7 +132,7 @@ def from_rm(self, num_samples: int, fields: list[str], input_keys: list[str]) -> ) except AttributeError: raise ValueError( - "Retrieval module not found. Please set a retrieval module using `dspy.settings.configure`." + "Retrieval module not found. Please set a retrieval module using `dspy.configure`." ) def sample( diff --git a/dspy/dsp/utils/settings.py b/dspy/dsp/utils/settings.py index f5319a9f9f..1298f7fdad 100644 --- a/dspy/dsp/utils/settings.py +++ b/dspy/dsp/utils/settings.py @@ -151,7 +151,7 @@ def _ensure_configure_allowed(self): if not in_ipython and config_owner_async_task != asyncio.current_task(): raise RuntimeError( - "dspy.settings.configure(...) can only be called from the same async task that called it first. Please " + "dspy.configure(...) can only be called from the same async task that called it first. Please " "use `dspy.context(...)` in other async tasks instead." ) diff --git a/dspy/predict/best_of_n.py b/dspy/predict/best_of_n.py index 77013ec075..789ea0ed86 100644 --- a/dspy/predict/best_of_n.py +++ b/dspy/predict/best_of_n.py @@ -29,7 +29,7 @@ def __init__( ```python import dspy - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) # Define a QA module with chain of thought qa = dspy.ChainOfThought("question -> answer") diff --git a/dspy/predict/refine.py b/dspy/predict/refine.py index 8c1d756d2f..320365cb91 100644 --- a/dspy/predict/refine.py +++ b/dspy/predict/refine.py @@ -67,7 +67,7 @@ def __init__( ```python import dspy - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) # Define a QA module with chain of thought qa = dspy.ChainOfThought("question -> answer") diff --git a/dspy/propose/dataset_summary_generator.py b/dspy/propose/dataset_summary_generator.py index fd9284999a..50bcff27c7 100644 --- a/dspy/propose/dataset_summary_generator.py +++ b/dspy/propose/dataset_summary_generator.py @@ -50,7 +50,7 @@ def create_dataset_summary(trainset, view_data_batch_size, prompt_model, log_fil print("\nBootstrapping dataset summary (this will be used to generate instructions)...") upper_lim = min(len(trainset), view_data_batch_size) prompt_model = prompt_model if prompt_model else dspy.settings.lm - with dspy.settings.context(lm=prompt_model): + with dspy.context(lm=prompt_model): observation = dspy.Predict(DatasetDescriptor, n=1, temperature=1.0)(examples=order_input_keys_in_string(trainset[0:upper_lim].__repr__())) observations = observation["observations"] @@ -68,7 +68,7 @@ def create_dataset_summary(trainset, view_data_batch_size, prompt_model, log_fil if verbose: print(f"b: {b}") upper_lim = min(len(trainset), b+view_data_batch_size) - with dspy.settings.context(lm=prompt_model): + with dspy.context(lm=prompt_model): output = dspy.Predict(DatasetDescriptorWithPriorObservations, n=1, temperature=1.0)(prior_observations=observations, examples=order_input_keys_in_string(trainset[b:upper_lim].__repr__())) if len(output["observations"]) >= 8 and output["observations"][:8].upper() == "COMPLETE": skips += 1 @@ -84,7 +84,7 @@ def create_dataset_summary(trainset, view_data_batch_size, prompt_model, log_fil print(f"e {e}. using observations from past round for a summary.") if prompt_model: - with dspy.settings.context(lm=prompt_model): + with dspy.context(lm=prompt_model): summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations) else: summary = dspy.Predict(ObservationSummarizer, n=1, temperature=1.0)(observations=observations) diff --git a/dspy/propose/grounded_proposer.py b/dspy/propose/grounded_proposer.py index 5f9b759e66..ead30b5281 100644 --- a/dspy/propose/grounded_proposer.py +++ b/dspy/propose/grounded_proposer.py @@ -419,7 +419,7 @@ def propose_instruction_for_predictor( temperature=self.init_temperature, ) - with dspy.settings.context(lm=rollout_lm): + with dspy.context(lm=rollout_lm): proposed_instruction = instruction_generator( demo_candidates=demo_candidates, pred_i=pred_i, diff --git a/dspy/retrievers/weaviate_rm.py b/dspy/retrievers/weaviate_rm.py index 381919254a..c5af333f47 100644 --- a/dspy/retrievers/weaviate_rm.py +++ b/dspy/retrievers/weaviate_rm.py @@ -34,7 +34,7 @@ class WeaviateRM(dspy.Retrieve): llm = dspy.Cohere(model="command-r-plus", api_key=api_key) weaviate_client = weaviate.connect_to_[local, wcs, custom, embedded]("your-path-here") retriever_model = WeaviateRM("my_collection_name", weaviate_client=weaviate_client) - dspy.settings.configure(lm=llm, rm=retriever_model) + dspy.configure(lm=llm, rm=retriever_model) retrieve = dspy.Retrieve(k=1) topK_passages = retrieve("what are the stages in planning, sanctioning and execution of public works").passages diff --git a/dspy/streaming/streamify.py b/dspy/streaming/streamify.py index d8ec56eb2f..9cfec34d40 100644 --- a/dspy/streaming/streamify.py +++ b/dspy/streaming/streamify.py @@ -64,7 +64,7 @@ def streamify( import asyncio import dspy - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) # Create the program and wrap it with streaming functionality program = dspy.streamify(dspy.Predict("q->a")) @@ -88,7 +88,7 @@ async def use_streaming(): import asyncio import dspy - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) class MyStatusMessageProvider(StatusMessageProvider): def module_start_status_message(self, instance, inputs): @@ -121,7 +121,7 @@ async def use_streaming(): import asyncio import dspy - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False)) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False)) # Create the program and wrap it with streaming functionality predict = dspy.Predict("question->answer, reasoning") diff --git a/dspy/teleprompt/bootstrap.py b/dspy/teleprompt/bootstrap.py index 31a5736239..e7e6b8f434 100644 --- a/dspy/teleprompt/bootstrap.py +++ b/dspy/teleprompt/bootstrap.py @@ -182,13 +182,13 @@ def _bootstrap_one_example(self, example, round_idx=0): predictor_cache = {} try: - with dspy.settings.context(trace=[], **self.teacher_settings): + with dspy.context(trace=[], **self.teacher_settings): lm = dspy.settings.lm # Use a fresh rollout with temperature=1.0 to bypass caches. lm = lm.copy(rollout_id=round_idx, temperature=1.0) if round_idx > 0 else lm new_settings = {"lm": lm} if round_idx > 0 else {} - with dspy.settings.context(**new_settings): + with dspy.context(**new_settings): for name, predictor in teacher.named_predictors(): predictor_cache[name] = predictor.demos predictor.demos = [x for x in predictor.demos if x != example] diff --git a/dspy/teleprompt/copro_optimizer.py b/dspy/teleprompt/copro_optimizer.py index e0f4f71749..dd3413fedb 100644 --- a/dspy/teleprompt/copro_optimizer.py +++ b/dspy/teleprompt/copro_optimizer.py @@ -20,7 +20,7 @@ Note that this teleprompter takes in the following parameters: -* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)). +* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.configure(lm=task_model)). * metric: The task metric used for optimization. * breadth: The number of new prompts to generate at each iteration. Default=10. * depth: The number of times we should ask our prompt model to generate new prompts, with the history of the past prompts as input. Default=3. @@ -156,7 +156,7 @@ def compile(self, student, *, trainset, eval_kwargs): basic_instruction = self._get_signature(predictor).instructions basic_prefix = self._get_signature(predictor).fields[last_key].json_schema_extra["prefix"] if self.prompt_model: - with dspy.settings.context(lm=self.prompt_model): + with dspy.context(lm=self.prompt_model): instruct = dspy.Predict( BasicGenerateInstruction, n=self.breadth - 1, @@ -306,7 +306,7 @@ def compile(self, student, *, trainset, eval_kwargs): # Generate next batch of potential prompts to optimize, with previous attempts as input if self.prompt_model: - with dspy.settings.context(lm=self.prompt_model): + with dspy.context(lm=self.prompt_model): instr = dspy.Predict( GenerateInstructionGivenAttempts, n=self.breadth, diff --git a/dspy/teleprompt/infer_rules.py b/dspy/teleprompt/infer_rules.py index 13e813fe76..2dcb240665 100644 --- a/dspy/teleprompt/infer_rules.py +++ b/dspy/teleprompt/infer_rules.py @@ -142,12 +142,12 @@ class CustomRulesInduction(dspy.Signature): self.rng = random.Random(0) def forward(self, examples_text): - with dspy.settings.context(**self.teacher_settings): + with dspy.context(**self.teacher_settings): # Generate rules with a fresh rollout and non-zero temperature. lm = dspy.settings.lm.copy( rollout_id=self.rng.randint(0, 10**9), temperature=1.0 ) - with dspy.settings.context(lm=lm): + with dspy.context(lm=lm): rules = self.rules_induction(examples_text=examples_text).natural_language_rules return rules.strip() diff --git a/dspy/teleprompt/signature_opt.py b/dspy/teleprompt/signature_opt.py index e5c364daae..8fc2d293fc 100644 --- a/dspy/teleprompt/signature_opt.py +++ b/dspy/teleprompt/signature_opt.py @@ -17,7 +17,7 @@ Note that this teleprompter takes in the following parameters: -* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.settings.configure(lm=task_model)). +* prompt_model: The model used for prompt generation. When unspecified, defaults to the model set in settings (ie. dspy.configure(lm=task_model)). * metric: The task metric used for optimization. * breadth: The number of new prompts to generate at each iteration. Default=10. * depth: The number of times we should ask our prompt model to generate new prompts, with the history of the past prompts as input. Default=3. diff --git a/dspy/teleprompt/simba_utils.py b/dspy/teleprompt/simba_utils.py index fd5c3e8808..a47c00c821 100644 --- a/dspy/teleprompt/simba_utils.py +++ b/dspy/teleprompt/simba_utils.py @@ -155,7 +155,7 @@ def append_a_rule(bucket, system, **kwargs): kwargs = {k: v if isinstance(v, str) else orjson.dumps(recursive_mask(v), option=orjson.OPT_INDENT_2).decode() for k, v in kwargs.items()} - with dspy.settings.context(trace=[], lm=prompt_model): + with dspy.context(trace=[], lm=prompt_model): advice_program = dspy.Predict(OfferFeedback) advice = advice_program(**kwargs).module_advice diff --git a/dspy/utils/callback.py b/dspy/utils/callback.py index 20be402e86..eaf2fd9e6e 100644 --- a/dspy/utils/callback.py +++ b/dspy/utils/callback.py @@ -18,11 +18,11 @@ class BaseCallback: To use a callback, subclass this class and implement the desired handlers. Each handler will be called at the appropriate time before/after the execution of the corresponding component. For example, if you want to print a message before and after an LM is called, implement `the on_llm_start` and `on_lm_end` handler. - Users can set the callback globally using `dspy.settings.configure` or locally by passing it to the component + Users can set the callback globally using `dspy.configure` or locally by passing it to the component constructor. - Example 1: Set a global callback using `dspy.settings.configure`. + Example 1: Set a global callback using `dspy.configure`. ``` import dspy @@ -36,7 +36,7 @@ def on_lm_start(self, call_id, instance, inputs): def on_lm_end(self, call_id, outputs, exception): print(f"LM is finished with outputs: {outputs}") - dspy.settings.configure( + dspy.configure( callbacks=[LoggingCallback()] ) diff --git a/dspy/utils/dummies.py b/dspy/utils/dummies.py index a79f6b6c48..fb96f2c8c1 100644 --- a/dspy/utils/dummies.py +++ b/dspy/utils/dummies.py @@ -25,7 +25,7 @@ class DummyLM(LM): ``` lm = DummyLM([{"answer": "red"}, {"answer": "blue"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) predictor("What color is the sky?") # Output: # [[## answer ##]] @@ -44,7 +44,7 @@ class DummyLM(LM): ``` lm = DummyLM({"What color is the sky?": {"answer": "blue"}}) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) predictor("What color is the sky?") # Output: # [[## answer ##]] @@ -58,7 +58,7 @@ class DummyLM(LM): ``` lm = DummyLM([{"answer": "red"}], follow_examples=True) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) predictor("What color is the sky?, demos=dspy.Example(input="What color is the sky?", output="blue")) # Output: # [[## answer ##]] diff --git a/tests/callback/test_callback.py b/tests/callback/test_callback.py index 365266ca9d..d0c73a16de 100644 --- a/tests/callback/test_callback.py +++ b/tests/callback/test_callback.py @@ -14,7 +14,7 @@ def reset_settings(): yield - dspy.settings.configure(**original_settings) + dspy.configure(**original_settings) class MyCallback(BaseCallback): @@ -71,7 +71,7 @@ def forward(self, x: int, y: str, z: float) -> int: return x + int(y) + int(z) callback = MyCallback() - dspy.settings.configure(callbacks=[callback]) + dspy.configure(callbacks=[callback]) target = Target() result = target.forward(*args, **kwargs) @@ -122,7 +122,7 @@ def forward(self, x: int, y: str, z: float) -> int: raise ValueError("Error") callback = MyCallback() - dspy.settings.configure(callbacks=[callback]) + dspy.configure(callbacks=[callback]) target = Target() @@ -144,7 +144,7 @@ def forward(self, x: int, y: str, z: float) -> int: callback_1 = MyCallback() callback_2 = MyCallback() - dspy.settings.configure(callbacks=[callback_1, callback_2]) + dspy.configure(callbacks=[callback_1, callback_2]) target = Target() result = target.forward(1, "2", 3.0) @@ -157,7 +157,7 @@ def forward(self, x: int, y: str, z: float) -> int: def test_callback_complex_module(): callback = MyCallback() - dspy.settings.configure( + dspy.configure( lm=DummyLM({"How are you?": {"answer": "test output", "reasoning": "No more responses"}}), callbacks=[callback], ) @@ -220,7 +220,7 @@ async def test_callback_async_module(): def test_tool_calls(): callback = MyCallback() - dspy.settings.configure(callbacks=[callback]) + dspy.configure(callbacks=[callback]) def tool_1(query: str) -> str: """A dummy tool function.""" @@ -279,7 +279,7 @@ def forward(self): pass callback = CustomCallback() - dspy.settings.configure(callbacks=[callback]) + dspy.configure(callbacks=[callback]) parent = Parent() parent() diff --git a/tests/clients/test_inspect_global_history.py b/tests/clients/test_inspect_global_history.py index 2deb0c2e97..cd9a42b703 100644 --- a/tests/clients/test_inspect_global_history.py +++ b/tests/clients/test_inspect_global_history.py @@ -14,7 +14,7 @@ def clear_history(): def test_inspect_history_basic(capsys): # Configure a DummyLM with some predefined responses lm = DummyLM([{"response": "Hello"}, {"response": "How are you?"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) # Make some calls to generate history predictor = dspy.Predict("query: str -> response: str") @@ -35,7 +35,7 @@ def test_inspect_history_with_n(capsys): Random failures in this test most likely mean you are printing messages somewhere """ lm = DummyLM([{"response": "One"}, {"response": "Two"}, {"response": "Three"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) # Generate some history predictor = dspy.Predict("query: str -> response: str") @@ -54,7 +54,7 @@ def test_inspect_history_with_n(capsys): def test_inspect_empty_history(capsys): # Configure fresh DummyLM lm = DummyLM([]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) # Test inspecting empty history dspy.inspect_history() @@ -65,7 +65,7 @@ def test_inspect_empty_history(capsys): def test_inspect_history_n_larger_than_history(capsys): lm = DummyLM([{"response": "First"}, {"response": "Second"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) predictor = dspy.Predict("query: str -> response: str") predictor(query="Query 1") diff --git a/tests/conftest.py b/tests/conftest.py index 372585df20..b634c8ac6e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -17,7 +17,7 @@ def clear_settings(): import dspy from dspy.dsp.utils.settings import DEFAULT_CONFIG - dspy.settings.configure(**copy.deepcopy(DEFAULT_CONFIG), inherit_config=False) + dspy.configure(**copy.deepcopy(DEFAULT_CONFIG), inherit_config=False) @pytest.fixture diff --git a/tests/evaluate/test_evaluate.py b/tests/evaluate/test_evaluate.py index 211cf25962..2b336e3c21 100644 --- a/tests/evaluate/test_evaluate.py +++ b/tests/evaluate/test_evaluate.py @@ -34,7 +34,7 @@ def test_evaluate_initialization(): def test_evaluate_call(): - dspy.settings.configure( + dspy.configure( lm=DummyLM( { "What is 1+1?": {"answer": "2"}, @@ -86,7 +86,7 @@ def test_construct_result_df(): def test_multithread_evaluate_call(): - dspy.settings.configure(lm=DummyLM({"What is 1+1?": {"answer": "2"}, "What is 2+2?": {"answer": "4"}})) + dspy.configure(lm=DummyLM({"What is 1+1?": {"answer": "2"}, "What is 2+2?": {"answer": "4"}})) devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] program = Predict("question -> answer") assert program(question="What is 1+1?").answer == "2" @@ -109,7 +109,7 @@ def __call__(self, *args, **kwargs): time.sleep(1) return super().__call__(*args, **kwargs) - dspy.settings.configure(lm=SlowLM({"What is 1+1?": {"answer": "2"}, "What is 2+2?": {"answer": "4"}})) + dspy.configure(lm=SlowLM({"What is 1+1?": {"answer": "2"}, "What is 2+2?": {"answer": "4"}})) devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] program = Predict("question -> answer") @@ -138,7 +138,7 @@ def sleep_then_interrupt(): def test_evaluate_call_wrong_answer(): - dspy.settings.configure(lm=DummyLM({"What is 1+1?": {"answer": "0"}, "What is 2+2?": {"answer": "0"}})) + dspy.configure(lm=DummyLM({"What is 1+1?": {"answer": "0"}, "What is 2+2?": {"answer": "0"}})) devset = [new_example("What is 1+1?", "2"), new_example("What is 2+2?", "4")] program = Predict("question -> answer") ev = Evaluate( @@ -180,7 +180,7 @@ def test_evaluate_display_table(program_with_example, display_table, is_in_ipyth example_input = next(iter(example.inputs().values())) example_output = {key: value for key, value in example.toDict().items() if key not in example.inputs()} - dspy.settings.configure( + dspy.configure( lm=DummyLM( { example_input: example_output, @@ -234,7 +234,7 @@ def on_evaluate_end( self.end_call_count += 1 callback = TestCallback() - dspy.settings.configure( + dspy.configure( lm=DummyLM( { "What is 1+1?": {"answer": "2"}, diff --git a/tests/examples/test_baleen.py b/tests/examples/test_baleen.py index 1cc27e8fcb..eed29eabfd 100644 --- a/tests/examples/test_baleen.py +++ b/tests/examples/test_baleen.py @@ -57,7 +57,7 @@ def load_hotpotqa(): def _test_baleen(): lm = dspy.OpenAI(model="gpt-3.5-turbo") rm = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts") - dspy.settings.configure(lm=lm, rm=rm) + dspy.configure(lm=lm, rm=rm) # Ask any question you like to this simple RAG program. my_question = "How many storeys are in the castle that David Gregory inherited?" @@ -98,7 +98,7 @@ def _test_compiled_baleen(): trainset, devset = load_hotpotqa() lm = dspy.OpenAI(model="gpt-3.5-turbo") rm = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts") - dspy.settings.configure(lm=lm, rm=rm) + dspy.configure(lm=lm, rm=rm) uncompiled_baleen = SimplifiedBaleen() # uncompiled (i.e., zero-shot) program diff --git a/tests/predict/test_best_of_n.py b/tests/predict/test_best_of_n.py index 777e99e547..684bc0629d 100644 --- a/tests/predict/test_best_of_n.py +++ b/tests/predict/test_best_of_n.py @@ -19,7 +19,7 @@ def forward(self, **kwargs) -> Prediction: def test_refine_forward_success_first_attempt(): lm = DummyLM([{"answer": "Brussels"}, {"answer": "City of Brussels"}, {"answer": "Brussels"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) module_call_count = [0] def count_calls(self, **kwargs): @@ -47,7 +47,7 @@ def reward_fn(kwargs, pred: Prediction) -> float: def test_refine_module_default_fail_count(): lm = DummyLM([{"answer": "Brussels"}, {"answer": "City of Brussels"}, {"answer": "Brussels"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) def always_raise(self, **kwargs): raise ValueError("Deliberately failing") @@ -61,7 +61,7 @@ def always_raise(self, **kwargs): def test_refine_module_custom_fail_count(): lm = DummyLM([{"answer": "Brussels"}, {"answer": "City of Brussels"}, {"answer": "Brussels"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) module_call_count = [0] def raise_on_second_call(self, **kwargs): diff --git a/tests/predict/test_chain_of_thought.py b/tests/predict/test_chain_of_thought.py index 247256dba7..686bd38668 100644 --- a/tests/predict/test_chain_of_thought.py +++ b/tests/predict/test_chain_of_thought.py @@ -7,7 +7,7 @@ def test_initialization_with_string_signature(): lm = DummyLM([{"reasoning": "find the number after 1", "answer": "2"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) predict = ChainOfThought("question -> answer") assert list(predict.predict.signature.output_fields.keys()) == [ "reasoning", diff --git a/tests/predict/test_code_act.py b/tests/predict/test_code_act.py index 39f958af09..424656f8e0 100644 --- a/tests/predict/test_code_act.py +++ b/tests/predict/test_code_act.py @@ -34,7 +34,7 @@ def test_codeact_code_generation(): {"reasoning": "Reason_B", "answer": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) program = CodeAct(BasicQA, tools=[add]) res = program(question="What is 1+1?") assert res.answer == "2" @@ -66,7 +66,7 @@ def test_codeact_support_multiple_fields(): {"reasoning": "Reason_B", "maximum": "6", "minimum": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) program = CodeAct(ExtremumFinder, tools=[extract_maximum_minimum]) res = program(input_list="2, 3, 5, 6") assert res.maximum == "6" @@ -95,7 +95,7 @@ def test_codeact_code_parse_failure(): {"reasoning": "Reason_B", "answer": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) program = CodeAct(BasicQA, tools=[add]) res = program(question="What is 1+1?") assert res.answer == "2" @@ -125,7 +125,7 @@ def test_codeact_code_execution_failure(): {"reasoning": "Reason_B", "answer": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) program = CodeAct(BasicQA, tools=[add]) res = program(question="What is 1+1?") assert res.answer == "2" diff --git a/tests/predict/test_multi_chain_comparison.py b/tests/predict/test_multi_chain_comparison.py index 39f7c18cc7..294e28db3d 100644 --- a/tests/predict/test_multi_chain_comparison.py +++ b/tests/predict/test_multi_chain_comparison.py @@ -33,7 +33,7 @@ def test_basic_example(): # Call the MultiChainComparison on the completions question = "What is the color of the sky?" lm = DummyLM([{"rationale": "my rationale", "answer": "blue"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) final_pred = compare_answers(completions, question=question) assert final_pred.rationale == "my rationale" diff --git a/tests/predict/test_parallel.py b/tests/predict/test_parallel.py index fe8479079b..f8524d4d25 100644 --- a/tests/predict/test_parallel.py +++ b/tests/predict/test_parallel.py @@ -12,7 +12,7 @@ def test_parallel_module(): {"output": "test output 5"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) class MyModule(dspy.Module): def __init__(self): @@ -99,7 +99,7 @@ def test_nested_parallel_module(): {"output": "test output 5"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) class MyModule(dspy.Module): def __init__(self): @@ -143,7 +143,7 @@ def test_nested_batch_method(): {"output": "test output 5"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) class MyModule(dspy.Module): def __init__(self): diff --git a/tests/predict/test_predict.py b/tests/predict/test_predict.py index e5a208b070..23b6e1d785 100644 --- a/tests/predict/test_predict.py +++ b/tests/predict/test_predict.py @@ -70,7 +70,7 @@ def test_lm_after_dump_and_load_state(): def test_call_method(): predict_instance = Predict("input -> output") lm = DummyLM([{"output": "test output"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) result = predict_instance(input="test input") assert result.output == "test output" @@ -260,14 +260,14 @@ def test_lm_field_after_dump_and_load_state(tmp_path, filename): def test_forward_method(): program = Predict("question -> answer") - dspy.settings.configure(lm=DummyLM([{"answer": "No more responses"}])) + dspy.configure(lm=DummyLM([{"answer": "No more responses"}])) result = program(question="What is 1+1?").answer assert result == "No more responses" def test_forward_method2(): program = Predict("question -> answer1, answer2") - dspy.settings.configure(lm=DummyLM([{"answer1": "my first answer", "answer2": "my second answer"}])) + dspy.configure(lm=DummyLM([{"answer1": "my first answer", "answer2": "my second answer"}])) result = program(question="What is 1+1?") assert result.answer1 == "my first answer" assert result.answer2 == "my second answer" @@ -282,7 +282,7 @@ def test_config_management(): def test_multi_output(): program = Predict("question -> answer", n=2) - dspy.settings.configure(lm=DummyLM([{"answer": "my first answer"}, {"answer": "my second answer"}])) + dspy.configure(lm=DummyLM([{"answer": "my first answer"}, {"answer": "my second answer"}])) results = program(question="What is 1+1?") assert results.completions.answer[0] == "my first answer" assert results.completions.answer[1] == "my second answer" @@ -290,7 +290,7 @@ def test_multi_output(): def test_multi_output2(): program = Predict("question -> answer1, answer2", n=2) - dspy.settings.configure( + dspy.configure( lm=DummyLM( [ {"answer1": "my 0 answer", "answer2": "my 2 answer"}, @@ -327,7 +327,7 @@ class TimedSignature(dspy.Signature): } ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) output = program( events=[ @@ -359,7 +359,7 @@ class StatusSignature(dspy.Signature): } ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) output = program(current_status=Status.PENDING) assert output.next_status == Status.IN_PROGRESS @@ -386,7 +386,7 @@ class TicketStatusSignature(dspy.Signature): } ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) output = program(current_status=TicketStatus.OPEN) assert output.next_status == TicketStatus.CLOSED # By value @@ -409,7 +409,7 @@ class StatusSignature(dspy.Signature): } ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) output = program(current_status=Status.PENDING) assert output.next_status == Status.IN_PROGRESS @@ -436,7 +436,7 @@ class OutputOnlySignature(dspy.Signature): predictor = Predict(OutputOnlySignature) lm = DummyLM([{"output": "short answer"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) assert predictor().output == "short answer" @@ -474,10 +474,10 @@ class MySignature(dspy.Signature): if adapter_type == "chat": lm = SpyLM("dummy_model") - dspy.settings.configure(adapter=dspy.ChatAdapter(), lm=lm) + dspy.configure(adapter=dspy.ChatAdapter(), lm=lm) else: lm = SpyLM("dummy_model", return_json=True) - dspy.settings.configure(adapter=dspy.JSONAdapter(), lm=lm) + dspy.configure(adapter=dspy.JSONAdapter(), lm=lm) program( question="are you sure that's correct?", @@ -497,7 +497,7 @@ class MySignature(dspy.Signature): def test_lm_usage(): program = Predict("question -> answer") - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), track_usage=True) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), track_usage=True) with patch( "dspy.clients.lm.litellm_completion", return_value=ModelResponse( @@ -518,7 +518,7 @@ def program_wrapper(question): time.sleep(0.5) return program(question=question) - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), track_usage=True) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), track_usage=True) with patch( "dspy.clients.lm.litellm_completion", return_value=ModelResponse( @@ -634,10 +634,10 @@ class ConstrainedSignature(dspy.Signature): lm = SpyLM("dummy_model") if adapter_type == "chat": lm = SpyLM("dummy_model") - dspy.settings.configure(adapter=dspy.ChatAdapter(), lm=lm) + dspy.configure(adapter=dspy.ChatAdapter(), lm=lm) else: lm = SpyLM("dummy_model", return_json=True) - dspy.settings.configure(adapter=dspy.JSONAdapter(), lm=lm) + dspy.configure(adapter=dspy.JSONAdapter(), lm=lm) # Call the predictor to trigger instruction generation program(text="hello world", number=5) @@ -665,7 +665,7 @@ async def test_async_predict(): def test_predicted_outputs_piped_from_predict_to_lm_call(): program = Predict("question -> answer") - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini")) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini")) with patch("litellm.completion") as mock_completion: program( @@ -729,7 +729,7 @@ class TestSignature(dspy.Signature): def test_trace_size_limit(): program = Predict("question -> answer") - dspy.settings.configure(lm=DummyLM([{"answer": "Paris"}]), max_trace_size=3) + dspy.configure(lm=DummyLM([{"answer": "Paris"}]), max_trace_size=3) for _ in range(10): program(question="What is the capital of France?") @@ -739,7 +739,7 @@ def test_trace_size_limit(): def test_disable_trace(): program = Predict("question -> answer") - dspy.settings.configure(lm=DummyLM([{"answer": "Paris"}]), trace=None) + dspy.configure(lm=DummyLM([{"answer": "Paris"}]), trace=None) for _ in range(10): program(question="What is the capital of France?") @@ -749,7 +749,7 @@ def test_disable_trace(): def test_per_module_history_size_limit(): program = Predict("question -> answer") - dspy.settings.configure(lm=DummyLM([{"answer": "Paris"}]), max_history_size=5) + dspy.configure(lm=DummyLM([{"answer": "Paris"}]), max_history_size=5) for _ in range(10): program(question="What is the capital of France?") @@ -758,7 +758,7 @@ def test_per_module_history_size_limit(): def test_per_module_history_disabled(): program = Predict("question -> answer") - dspy.settings.configure(lm=DummyLM([{"answer": "Paris"}]), disable_history=True) + dspy.configure(lm=DummyLM([{"answer": "Paris"}]), disable_history=True) for _ in range(10): program(question="What is the capital of France?") diff --git a/tests/predict/test_program_of_thought.py b/tests/predict/test_program_of_thought.py index 4871afc7f6..3b197b6113 100644 --- a/tests/predict/test_program_of_thought.py +++ b/tests/predict/test_program_of_thought.py @@ -27,7 +27,7 @@ def test_pot_code_generation(): {"reasoning": "Reason_B", "answer": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) pot = ProgramOfThought(BasicQA) res = pot(question="What is 1+1?") assert res.answer == "2" @@ -43,7 +43,7 @@ def test_old_style_pot(): {"reasoning": "Reason_B", "answer": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) pot = ProgramOfThought(BasicQA) res = pot(question="What is 1+1?") assert res.answer == "2" @@ -67,7 +67,7 @@ def test_pot_support_multiple_fields(): {"reasoning": "Reason_B", "maximum": "6", "minimum": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) pot = ProgramOfThought(ExtremumFinder) res = pot(input_list="2, 3, 5, 6") assert res.maximum == "6" @@ -90,7 +90,7 @@ def test_pot_code_generation_with_one_error(): {"reasoning": "Reason_C", "answer": "2"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) pot = ProgramOfThought(BasicQA) res = pot(question="What is 1+1?") assert res.answer == "2" @@ -109,7 +109,7 @@ def test_pot_code_generation_persistent_errors(): ] * max_iters ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) pot = ProgramOfThought(BasicQA, max_iters=max_iters) with pytest.raises(RuntimeError, match="Max hops reached. Failed to run ProgramOfThought: ZeroDivisionError:"): @@ -125,7 +125,7 @@ def test_pot_code_parse_error(): ] * max_iters ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) pot = ProgramOfThought(BasicQA, max_iters=max_iters) with ( patch("dspy.predict.program_of_thought.ProgramOfThought._execute_code") as mock_execute_code, diff --git a/tests/predict/test_react.py b/tests/predict/test_react.py index 6c12859c0d..09fd1c7c85 100644 --- a/tests/predict/test_react.py +++ b/tests/predict/test_react.py @@ -41,7 +41,7 @@ def make_images(): ], adapter=adapter, ) - dspy.settings.configure(lm=lm, adapter=adapter) + dspy.configure(lm=lm, adapter=adapter) react = dspy.ReAct("question -> answer", tools=[make_images]) react(question="Draw me something red") @@ -99,7 +99,7 @@ class InvitationSignature(dspy.Signature): }, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) outputs = react( participant_name="Alice", @@ -144,7 +144,7 @@ def foo(a, b): {"reasoning": "I added the numbers successfully", "c": 3}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) outputs = react(a=1, b=2) expected_trajectory = { @@ -227,7 +227,7 @@ def foo(a, b): {"reasoning": "I added the numbers successfully", "c": 3}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) outputs = react(a=1, b=2, max_iters=2) traj = outputs.trajectory diff --git a/tests/predict/test_refine.py b/tests/predict/test_refine.py index 7bff4b2b09..7a5c2ece13 100644 --- a/tests/predict/test_refine.py +++ b/tests/predict/test_refine.py @@ -19,7 +19,7 @@ def forward(self, **kwargs) -> Prediction: def test_refine_forward_success_first_attempt(): lm = DummyLM([{"answer": "Brussels"}, {"answer": "City of Brussels"}, {"answer": "Brussels"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) module_call_count = [0] def count_calls(self, **kwargs): @@ -47,7 +47,7 @@ def reward_fn(kwargs, pred: Prediction) -> float: def test_refine_module_default_fail_count(): lm = DummyLM([{"answer": "Brussels"}, {"answer": "City of Brussels"}, {"answer": "Brussels"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) def always_raise(self, **kwargs): raise ValueError("Deliberately failing") @@ -61,7 +61,7 @@ def always_raise(self, **kwargs): def test_refine_module_custom_fail_count(): lm = DummyLM([{"answer": "Brussels"}, {"answer": "City of Brussels"}, {"answer": "Brussels"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) module_call_count = [0] def raise_on_second_call(self, **kwargs): diff --git a/tests/predict/test_retry.py b/tests/predict/test_retry.py index eb1c48b06e..45113617a2 100644 --- a/tests/predict/test_retry.py +++ b/tests/predict/test_retry.py @@ -17,7 +17,7 @@ # assert "feedback" in retry_module.new_signature.input_fields # lm = DummyLM([{"answer": "blue"}]) -# dspy.settings.configure(lm=lm) +# dspy.configure(lm=lm) # result = retry_module.forward( # question="What color is the sky?", # past_outputs={"answer": "red"}, @@ -29,7 +29,7 @@ # def test_retry_forward_with_feedback(): # # First we make a mistake, then we fix it # lm = DummyLM([{"answer": "red"}, {"answer": "blue"}]) -# dspy.settings.configure(lm=lm, trace=[]) +# dspy.configure(lm=lm, trace=[]) # class SimpleModule(dspy.Module): # def __init__(self): @@ -56,7 +56,7 @@ # # def test_retry_forward_with_typed_predictor(): # # # First we make a mistake, then we fix it # # lm = DummyLM([{"output": '{"answer":"red"}'}, {"output": '{"answer":"blue"}'}]) -# # dspy.settings.configure(lm=lm, trace=[]) +# # dspy.configure(lm=lm, trace=[]) # # class AnswerQuestion(dspy.Signature): # # """Answer questions with succinct responses.""" diff --git a/tests/primitives/test_base_module.py b/tests/primitives/test_base_module.py index f92315a90d..a8960cfda9 100644 --- a/tests/primitives/test_base_module.py +++ b/tests/primitives/test_base_module.py @@ -105,7 +105,7 @@ class MySignature(dspy.Signature): ] trainset = [dspy.Example(**example).with_inputs("current_date", "target_date") for example in trainset] - dspy.settings.configure( + dspy.configure( lm=DummyLM([{"date_diff": "1", "reasoning": "n/a"}, {"date_diff": "2", "reasoning": "n/a"}] * 10) ) @@ -247,7 +247,7 @@ def emit(self, record): @pytest.mark.llm_call def test_single_module_call_with_usage_tracker(lm_for_test): - dspy.settings.configure(lm=dspy.LM(lm_for_test, cache=False), track_usage=True) + dspy.configure(lm=dspy.LM(lm_for_test, cache=False), track_usage=True) predict = dspy.ChainOfThought("question -> answer") output = predict(question="What is the capital of France?") @@ -259,7 +259,7 @@ def test_single_module_call_with_usage_tracker(lm_for_test): assert lm_usage[lm_for_test]["total_tokens"] > 0 # Test no usage being tracked when cache is enabled - dspy.settings.configure(lm=dspy.LM(lm_for_test, cache=True), track_usage=True) + dspy.configure(lm=dspy.LM(lm_for_test, cache=True), track_usage=True) for _ in range(2): output = predict(question="What is the capital of France?") @@ -268,7 +268,7 @@ def test_single_module_call_with_usage_tracker(lm_for_test): @pytest.mark.llm_call def test_multi_module_call_with_usage_tracker(lm_for_test): - dspy.settings.configure(lm=dspy.LM(lm_for_test, cache=False), track_usage=True) + dspy.configure(lm=dspy.LM(lm_for_test, cache=False), track_usage=True) class MyProgram(dspy.Module): def __init__(self): @@ -301,12 +301,12 @@ def __init__(self, lm): self.predict2 = dspy.ChainOfThought("question, answer -> score") def __call__(self, question: str) -> Prediction: - with dspy.settings.context(lm=self.lm): + with dspy.context(lm=self.lm): answer = self.predict1(question=question) score = self.predict2(question=question, answer=answer) return score - dspy.settings.configure(track_usage=True) + dspy.configure(track_usage=True) program1 = MyProgram(lm=dspy.LM("openai/gpt-4o-mini", cache=False)) program2 = MyProgram(lm=dspy.LM("openai/gpt-3.5-turbo", cache=False)) @@ -356,7 +356,7 @@ async def test_usage_tracker_async_parallel(): program.acall(question="What is the capital of France?"), program.acall(question="What is the capital of France?"), ] - with dspy.settings.context( + with dspy.context( lm=dspy.LM("openai/gpt-4o-mini", cache=False), track_usage=True, adapter=dspy.JSONAdapter() ): results = await asyncio.gather(*coroutines) @@ -404,7 +404,7 @@ def forward(self, question: str, **kwargs) -> Prediction: ], model="openai/gpt-4o-mini", ) - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), adapter=dspy.JSONAdapter()) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), adapter=dspy.JSONAdapter()) program = MyProgram() program(question="What is the capital of France?") @@ -421,7 +421,7 @@ def forward(self, question: str, **kwargs) -> Prediction: assert program.history[0]["outputs"] == ["{'reasoning': 'Paris is the capital of France', 'answer': 'Paris'}"] - dspy.settings.configure(disable_history=True) + dspy.configure(disable_history=True) program(question="What is the capital of France?") # No history is recorded when history is disabled. @@ -429,7 +429,7 @@ def forward(self, question: str, **kwargs) -> Prediction: assert len(program.cot.history) == 2 assert len(program.cot.predict.history) == 2 - dspy.settings.configure(disable_history=False) + dspy.configure(disable_history=False) program(question="What is the capital of France?") # History is recorded again when history is enabled. @@ -452,7 +452,7 @@ def forward(self, question: str, **kwargs) -> Prediction: choices=[Choices(message=Message(content="{'reasoning': 'N/A', 'answer': 'Holy crab!'}"))], model="openai/gpt-4o-mini", ) - dspy.settings.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), adapter=dspy.JSONAdapter()) + dspy.configure(lm=dspy.LM("openai/gpt-4o-mini", cache=False), adapter=dspy.JSONAdapter()) program = MyProgram() parallelizer = dspy.Parallel() diff --git a/tests/primitives/test_module.py b/tests/primitives/test_module.py index abd3f21a9d..166658673c 100644 --- a/tests/primitives/test_module.py +++ b/tests/primitives/test_module.py @@ -38,7 +38,7 @@ def test_predictors(): def test_forward(): program = HopModule() - dspy.settings.configure( + dspy.configure( lm=DummyLM( { "What is 1+1?": {"query": "let me check"}, diff --git a/tests/reliability/utils.py b/tests/reliability/utils.py index c713072402..3dfe9fba7d 100644 --- a/tests/reliability/utils.py +++ b/tests/reliability/utils.py @@ -80,7 +80,7 @@ def judge_dspy_configuration(**extra_judge_config): if judge_params is None: raise ValueError(f"No LiteLLM configuration found for judge model: {JUDGE_MODEL_NAME}") - with dspy.settings.context(lm=dspy.LM(**judge_params, **extra_judge_config), adapter=adapter): + with dspy.context(lm=dspy.LM(**judge_params, **extra_judge_config), adapter=adapter): yield diff --git a/tests/signatures/test_adapter_image.py b/tests/signatures/test_adapter_image.py index 07c8154e77..c0fe94371a 100644 --- a/tests/signatures/test_adapter_image.py +++ b/tests/signatures/test_adapter_image.py @@ -69,7 +69,7 @@ def count_patterns(obj, pattern): def setup_predictor(signature, expected_output): """Helper to set up a predictor with DummyLM""" lm = DummyLM([expected_output]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) return dspy.Predict(signature), lm diff --git a/tests/signatures/test_signature.py b/tests/signatures/test_signature.py index 9e63743b9d..292401bb75 100644 --- a/tests/signatures/test_signature.py +++ b/tests/signatures/test_signature.py @@ -189,7 +189,7 @@ class SubSignature(Signature): def test_multiline_instructions(): lm = DummyLM([{"output": "short answer"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) class MySignature(Signature): """First line @@ -418,7 +418,7 @@ class CustomType(pydantic.BaseModel): assert test_signature.input_fields["input"].annotation == CustomType lm = DummyLM([{"output": "processed"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) custom_obj = CustomType(value="test") pred = dspy.Predict(test_signature)(input=custom_obj) @@ -432,7 +432,7 @@ def test_custom_type_from_different_module(): assert test_signature.input_fields["input"].annotation == Path lm = DummyLM([{"output": "/test/path"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) path_obj = Path("/test/path") pred = dspy.Predict(test_signature)(input=path_obj) @@ -568,7 +568,7 @@ class CustomType(pydantic.BaseModel): assert sig.output_fields["output"].annotation == Union[int, str] lm = DummyLM([{"output": "processed"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) custom_obj = CustomType(value="test") pred = dspy.Predict(sig)(input=custom_obj) diff --git a/tests/teleprompt/test_bootstrap.py b/tests/teleprompt/test_bootstrap.py index 17b58aedaf..75193eb24a 100644 --- a/tests/teleprompt/test_bootstrap.py +++ b/tests/teleprompt/test_bootstrap.py @@ -43,7 +43,7 @@ def test_compile_with_predict_instances(): teacher = SimpleModule("input -> output") lm = DummyLM(["Initial thoughts", "Finish[blue]"]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) # Initialize BootstrapFewShot and compile the student bootstrap = BootstrapFewShot(metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1) @@ -58,7 +58,7 @@ def test_bootstrap_effectiveness(): student = SimpleModule("input -> output") teacher = SimpleModule("input -> output") lm = DummyLM([{"output": "blue"}, {"output": "Ring-ding-ding-ding-dingeringeding!"}], follow_examples=True) - dspy.settings.configure(lm=lm, trace=[]) + dspy.configure(lm=lm, trace=[]) bootstrap = BootstrapFewShot(metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1) compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) @@ -99,7 +99,7 @@ def forward(self, **kwargs): {"output": "Initial thoughts"}, # Simulate initial teacher's prediction ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) bootstrap = BootstrapFewShot( metric=simple_metric, @@ -125,7 +125,7 @@ def test_validation_set_usage(): {"output": "Finish[blue]"}, # Expected output for both training and validation ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) bootstrap = BootstrapFewShot(metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1) compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) diff --git a/tests/teleprompt/test_bootstrap_finetune.py b/tests/teleprompt/test_bootstrap_finetune.py index 59c685902f..2eb8614560 100644 --- a/tests/teleprompt/test_bootstrap_finetune.py +++ b/tests/teleprompt/test_bootstrap_finetune.py @@ -42,7 +42,7 @@ def test_compile_with_predict_instances(): teacher = SimpleModule("input -> output") lm = DummyLM([{"output": "blue"}, {"output": "Ring-ding-ding-ding-dingeringeding!"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) # Set LM for both student and teacher student.set_lm(lm) @@ -65,7 +65,7 @@ def test_error_handling_missing_lm(): """Test error handling when predictor doesn't have an LM assigned.""" lm = DummyLM([{"output": "test"}]) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) student = SimpleModule("input -> output") # Intentionally NOT setting LM for the student module diff --git a/tests/teleprompt/test_copro_optimizer.py b/tests/teleprompt/test_copro_optimizer.py index c031d3c7a3..b1dd870e8d 100644 --- a/tests/teleprompt/test_copro_optimizer.py +++ b/tests/teleprompt/test_copro_optimizer.py @@ -39,7 +39,7 @@ def forward(self, **kwargs): def test_signature_optimizer_optimization_process(): optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) - dspy.settings.configure( + dspy.configure( lm=DummyLM( [ { @@ -69,7 +69,7 @@ def test_signature_optimizer_statistics_tracking(): optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) optimizer.track_stats = True # Enable statistics tracking - dspy.settings.configure( + dspy.configure( lm=DummyLM( [ { @@ -105,7 +105,7 @@ def test_optimization_and_output_verification(): {"reasoning": "france", "output": "Paris"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) optimizer = COPRO(metric=simple_metric, breadth=2, depth=1, init_temperature=1.4) student = SimpleModule("input -> output") @@ -125,7 +125,7 @@ def test_optimization_and_output_verification(): def test_statistics_tracking_during_optimization(): - dspy.settings.configure( + dspy.configure( lm=DummyLM( [ {"proposed_instruction": "Optimized Prompt", "proposed_prefix_for_output_field": "Optimized Prefix"}, diff --git a/tests/teleprompt/test_gepa.py b/tests/teleprompt/test_gepa.py index da9a74da82..7a379e6ab4 100644 --- a/tests/teleprompt/test_gepa.py +++ b/tests/teleprompt/test_gepa.py @@ -96,7 +96,7 @@ def test_basic_workflow(use_mlflow, mock_mlflow): reflection_lm_history = data["reflection_lm"] lm_main = DictDummyLM(lm_history) - dspy.settings.configure(lm=lm_main) + dspy.configure(lm=lm_main) reflection_lm = DictDummyLM(reflection_lm_history) optimizer = dspy.GEPA( @@ -165,7 +165,7 @@ def all_component_selector(state, trajectories, subsample_scores, candidate_idx, lm_main = DictDummyLM(lm_history) reflection_lm = DictDummyLM(reflection_lm_history) - dspy.settings.configure(lm=lm_main) + dspy.configure(lm=lm_main) optimizer = dspy.GEPA( metric=metric, reflection_lm=reflection_lm, diff --git a/tests/teleprompt/test_gepa_instruction_proposer.py b/tests/teleprompt/test_gepa_instruction_proposer.py index 1efeb5c5ac..56af5e2070 100644 --- a/tests/teleprompt/test_gepa_instruction_proposer.py +++ b/tests/teleprompt/test_gepa_instruction_proposer.py @@ -107,7 +107,7 @@ def test_reflection_lm_gets_structured_images(): {"label": "vertebrate"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) gepa = dspy.GEPA( metric=lambda gold, pred, trace=None, pred_name=None, pred_trace=None: 0.3, @@ -172,7 +172,7 @@ def __call__(self, candidate, reflective_dataset, components_to_update): {"label": "mode"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) # Test the full flexibility: no reflection_lm provided to GEPA at all! # The updated GEPA core library now allows this when using custom proposers @@ -266,7 +266,7 @@ def __call__( {"label": "herbivore"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) reflection_lm = DummyLM( [ @@ -323,7 +323,7 @@ def test_default_proposer(): {"label": "herbivore"}, ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) reflection_lm = DummyLM( [ diff --git a/tests/teleprompt/test_knn_fewshot.py b/tests/teleprompt/test_knn_fewshot.py index db72c58e52..a6a451425e 100644 --- a/tests/teleprompt/test_knn_fewshot.py +++ b/tests/teleprompt/test_knn_fewshot.py @@ -49,7 +49,7 @@ def _test_knn_few_shot_compile(setup_knn_few_shot): # Setup DummyLM with a response for a query similar to one of the training examples lm = DummyLM(["Madrid", "10"]) - dspy.settings.configure(lm=lm) # Responses for the capital of Spain and the result of 5+5) + dspy.configure(lm=lm) # Responses for the capital of Spain and the result of 5+5) knn_few_shot = setup_knn_few_shot trainset = knn_few_shot.KNN.trainset diff --git a/tests/teleprompt/test_random_search.py b/tests/teleprompt/test_random_search.py index 7da4069aaa..3041be0d33 100644 --- a/tests/teleprompt/test_random_search.py +++ b/tests/teleprompt/test_random_search.py @@ -29,7 +29,7 @@ def test_basic_workflow(): "Finish[blue]", # Expected output for both training and validation ] ) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) optimizer = BootstrapFewShotWithRandomSearch(metric=simple_metric, max_bootstrapped_demos=1, max_labeled_demos=1) trainset = [ diff --git a/tests/utils/test_saving.py b/tests/utils/test_saving.py index 57047fd2bd..85754ace7b 100644 --- a/tests/utils/test_saving.py +++ b/tests/utils/test_saving.py @@ -60,7 +60,7 @@ class MySignature(dspy.Signature): @pytest.mark.extra def test_save_compiled_model(tmp_path): predict = dspy.Predict("question->answer") - dspy.settings.configure(lm=DummyLM([{"answer": "blue"}, {"answer": "white"}] * 10)) + dspy.configure(lm=DummyLM([{"answer": "blue"}, {"answer": "white"}] * 10)) trainset = [ {"question": "What is the color of the sky?", "answer": "blue"}, diff --git a/tests/utils/test_settings.py b/tests/utils/test_settings.py index f2bd93ec26..410309f4a1 100644 --- a/tests/utils/test_settings.py +++ b/tests/utils/test_settings.py @@ -160,7 +160,7 @@ def bar1(): # In this case, foo1 (async) calls bar1 (sync), and bar1 uses the async task from foo1. with pytest.raises(RuntimeError) as e: dspy.configure(lm=dspy.LM("openai/gpt-4o")) - assert "dspy.settings.configure(...) can only be called from the same async" in str(e.value) + assert "dspy.configure(...) can only be called from the same async" in str(e.value) async def foo1(): bar1() @@ -170,7 +170,7 @@ async def foo2(): # `dspy.configure` is disallowed in different async tasks from the initial one. with pytest.raises(RuntimeError) as e: dspy.configure(lm=dspy.LM("openai/gpt-4o")) - assert "dspy.settings.configure(...) can only be called from the same async" in str(e.value) + assert "dspy.configure(...) can only be called from the same async" in str(e.value) await asyncio.sleep(0.1) async def foo3(): diff --git a/tests/utils/test_usage_tracker.py b/tests/utils/test_usage_tracker.py index 71c08ee651..574d1f7400 100644 --- a/tests/utils/test_usage_tracker.py +++ b/tests/utils/test_usage_tracker.py @@ -137,7 +137,7 @@ def test_track_usage_with_multiple_models(): def test_track_usage_context_manager(lm_for_test): lm = dspy.LM(lm_for_test, cache=False) - dspy.settings.configure(lm=lm) + dspy.configure(lm=lm) predict = dspy.ChainOfThought("question -> answer") with track_usage() as tracker: