Skip to content

Commit 9e667cc

Browse files
committed
refactor: update core components to generalize prompt optimization
1 parent 00c5d36 commit 9e667cc

File tree

8 files changed

+31
-899
lines changed

8 files changed

+31
-899
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@
2424

2525

2626

27-
prompt-ops is a Python package that **automatically optimizes prompts** for Llama models. It transforms prompts that work well with other LLMs into prompts that are optimized for Llama models, improving performance and reliability.
27+
prompt-ops is a Python package that **automatically optimizes prompts** for Llama models. It transforms prompts that work well with other LLMs into prompts that are optimized for LLM models, improving performance and reliability.
2828

2929
**Key Benefits:**
3030
- **No More Trial and Error**: Stop manually tweaking prompts to get better results
31-
- **Fast Optimization**: Get Llama-optimized prompts in minutes with template-based optimization
31+
- **Fast Optimization**: Get model-optimized prompts in minutes with template-based optimization
3232
- **Data-Driven Improvements**: Use your own examples to create prompts that work for your specific use case
3333
- **Measurable Results**: Evaluate prompt performance with customizable metrics
3434

@@ -66,7 +66,7 @@ To get started with prompt-ops, you'll need:
6666
2. [**Prepare your dataset**](#preparing-your-data): Create a JSON file with query-response pairs for evaluation and optimization
6767
3. **Configure optimization**: Set up a simple YAML file with your dataset and preferences (see [example configuration](configs/facility-simple.yaml))
6868
4. [**Run optimization**](#step-4-run-optimization): Execute a single command to transform your prompt
69-
5. [**Get results**](#prompt-transformation-example): Receive a Llama-optimized prompt with performance metrics
69+
5. [**Get results**](#prompt-transformation-example): Receive a model-optimized prompt with performance metrics
7070

7171

7272
## Real-world Results

src/prompt_ops/core/migrator.py

Lines changed: 2 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
from .exceptions import EvaluationError, OptimizationError
2626
from .prompt_strategies import BaseStrategy
2727
from .utils import json_to_yaml_file
28-
from .utils.llama_utils import is_llama_model
2928
from .utils.logging import get_logger
3029

3130

@@ -73,31 +72,8 @@ def __init__(
7372
self.valset = valset
7473
self.testset = testset
7574

76-
# Determine if we're using a Llama model
77-
if model_family is None and task_model is not None:
78-
# Try to extract model name from task_model
79-
if hasattr(task_model, "model_name"):
80-
model_name = task_model.model_name
81-
else:
82-
model_name = str(task_model)
83-
84-
if is_llama_model(model_name):
85-
self.model_family = "llama"
86-
else:
87-
logging.warning(
88-
f"Model '{model_name}' does not appear to be a Llama model. "
89-
f"This library is optimized for Llama models and may not work as expected."
90-
)
91-
self.model_family = (
92-
"llama" # Default to Llama anyway since that's our focus
93-
)
94-
else:
95-
self.model_family = model_family or "llama"
96-
if self.model_family != "llama":
97-
logging.warning(
98-
f"Model family '{self.model_family}' specified, but this library "
99-
f"is optimized for Llama models and may not work as expected."
100-
)
75+
# Set model family if provided (model-agnostic, no defaults)
76+
self.model_family = model_family
10177

10278
self._optimized_program = None
10379
self.logger = get_logger()
@@ -112,7 +88,6 @@ def optimize(
11288
file_path: str = None,
11389
save_yaml: bool = True,
11490
user_prompt: str = None,
115-
use_llama_tips: bool = True,
11691
) -> Any:
11792
"""
11893
Optimize a prompt using the configured strategy.
@@ -136,14 +111,6 @@ def optimize(
136111
if "text" not in prompt_data:
137112
raise ValueError("prompt_data must contain a 'text' key.")
138113

139-
if use_llama_tips:
140-
from .utils.llama_utils import get_llama_tips
141-
142-
model_tips = get_llama_tips()
143-
144-
if "model_tips" not in prompt_data:
145-
prompt_data["model_tips"] = model_tips
146-
147114
trainset = trainset if trainset is not None else self.trainset
148115
valset = valset if valset is not None else self.valset
149116
testset = testset if testset is not None else self.testset

src/prompt_ops/core/model_strategies.py

Lines changed: 0 additions & 204 deletions
This file was deleted.

0 commit comments

Comments
 (0)