Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion mteb/models/model_implementations/google_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,5 +275,5 @@ def gemma_embedding_loader(model_name: str, revision: str, **kwargs):
public_training_data=None,
training_datasets=GECKO_TRAINING_DATA,
similarity_fn_name="cosine",
memory_usage_mb=578,
memory_usage_mb=1155,
)
2 changes: 1 addition & 1 deletion mteb/models/model_implementations/nb_sbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
revision="b95656350a076aeafd2d23763660f80655408cc6",
release_date="2022-11-23",
n_parameters=1_780_000_000,
memory_usage_mb=197,
memory_usage_mb=678,
embed_dim=4096,
license="apache-2.0",
max_tokens=75,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def encode(
release_date="2025-06-27",
modalities=["image", "text"],
n_parameters=2_418_000_000,
memory_usage_mb=9224,
memory_usage_mb=4610,
max_tokens=8192,
embed_dim=2048,
license="https://huggingface.co/nvidia/llama-nemoretriever-colembed-1b-v1/blob/main/LICENSE",
Expand All @@ -172,7 +172,7 @@ def encode(
release_date="2025-06-27",
modalities=["image", "text"],
n_parameters=4_407_000_000,
memory_usage_mb=16811,
memory_usage_mb=8403,
max_tokens=8192,
embed_dim=3072,
license="https://huggingface.co/nvidia/llama-nemoretriever-colembed-1b-v1/blob/main/LICENSE",
Expand Down
2 changes: 1 addition & 1 deletion mteb/models/model_implementations/nvidia_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def instruction_template(
revision="570834afd5fef5bf3a3c2311a2b6e0a66f6f4f2c",
release_date="2024-09-13", # initial commit of hf model.
n_parameters=7_850_000_000,
memory_usage_mb=29945,
memory_usage_mb=14975,
embed_dim=4096,
license="cc-by-nc-4.0",
max_tokens=32768,
Expand Down
4 changes: 2 additions & 2 deletions mteb/models/model_implementations/ops_moa_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def encode(self, sentences: list[str], **kwargs) -> np.ndarray:
languages=["zho-Hans"],
loader=OPSWrapper,
n_parameters=int(343 * 1e6),
memory_usage_mb=2e3,
memory_usage_mb=1308,
max_tokens=512,
embed_dim=1536,
license="cc-by-nc-4.0",
Expand Down Expand Up @@ -58,7 +58,7 @@ def encode(self, sentences: list[str], **kwargs) -> np.ndarray:
languages=["zho-Hans"],
loader=OPSWrapper,
n_parameters=int(343 * 1e6),
memory_usage_mb=2e3,
memory_usage_mb=1242,
max_tokens=512,
embed_dim=1536,
license="cc-by-nc-4.0",
Expand Down
8 changes: 4 additions & 4 deletions mteb/models/model_implementations/promptriever_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def loader_inner(**kwargs: Any) -> EncoderProtocol:
revision="01c7f73d771dfac7d292323805ebc428287df4f9-30b14e3813c0fa45facfd01a594580c3fe5ecf23", # base-peft revision
release_date="2024-09-15",
n_parameters=7_000_000_000,
memory_usage_mb=27,
memory_usage_mb=26703,
max_tokens=4096,
embed_dim=4096,
license="apache-2.0",
Expand Down Expand Up @@ -115,7 +115,7 @@ def loader_inner(**kwargs: Any) -> EncoderProtocol:
},
release_date="2024-09-15",
n_parameters=8_000_000_000,
memory_usage_mb=31,
memory_usage_mb=30518,
max_tokens=8192,
embed_dim=4096,
license="apache-2.0",
Expand Down Expand Up @@ -143,7 +143,7 @@ def loader_inner(**kwargs: Any) -> EncoderProtocol:
revision="5206a32e0bd3067aef1ce90f5528ade7d866253f-8b677258615625122c2eb7329292b8c402612c21", # base-peft revision
release_date="2024-09-15",
n_parameters=8_000_000_000,
memory_usage_mb=31,
memory_usage_mb=30518,
max_tokens=8192,
embed_dim=4096,
training_datasets={
Expand Down Expand Up @@ -175,7 +175,7 @@ def loader_inner(**kwargs: Any) -> EncoderProtocol:
revision="7231864981174d9bee8c7687c24c8344414eae6b-876d63e49b6115ecb6839893a56298fadee7e8f5", # base-peft revision
release_date="2024-09-15",
n_parameters=7_000_000_000,
memory_usage_mb=27,
memory_usage_mb=26703,
training_datasets={
# "samaya-ai/msmarco-w-instructions",
"mMARCO-NL", # translation not trained on
Expand Down
6 changes: 3 additions & 3 deletions mteb/models/model_implementations/qwen3_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def q3e_instruct_loader(
revision="b22da495047858cce924d27d76261e96be6febc0", # Commit of @tomaarsen
release_date="2025-06-05",
n_parameters=595776512,
memory_usage_mb=2272,
memory_usage_mb=1136,
embed_dim=1024,
max_tokens=32768,
license="apache-2.0",
Expand All @@ -161,7 +161,7 @@ def q3e_instruct_loader(
revision="636cd9bf47d976946cdbb2b0c3ca0cb2f8eea5ff", # Commit of @tomaarsen
release_date="2025-06-05",
n_parameters=4021774336,
memory_usage_mb=15341,
memory_usage_mb=7671,
embed_dim=2560,
max_tokens=32768,
license="apache-2.0",
Expand All @@ -183,7 +183,7 @@ def q3e_instruct_loader(
revision="4e423935c619ae4df87b646a3ce949610c66241c", # Commit of @tomaarsen
release_date="2025-06-05",
n_parameters=7567295488,
memory_usage_mb=28866,
memory_usage_mb=14433,
embed_dim=4096,
max_tokens=32768,
license="apache-2.0",
Expand Down
2 changes: 1 addition & 1 deletion mteb/models/model_implementations/qzhou_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def instruction_template(
revision="f1e6c03ee3882e7b9fa5cec91217715272e433b8",
release_date="2025-08-24",
n_parameters=7_070_619_136,
memory_usage_mb=29070,
memory_usage_mb=14436,
embed_dim=3584,
license="apache-2.0",
max_tokens=8192,
Expand Down