Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions meilisearch/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ class Paths:
prefix_search = "prefix-search"
proximity_precision = "proximity-precision"
localized_attributes = "localized-attributes"
fields = "fields"
edit = "edit"
network = "network"
experimental_features = "experimental-features"
Expand Down
62 changes: 61 additions & 1 deletion meilisearch/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from meilisearch._utils import iso_to_date_time
from meilisearch.config import Config
from meilisearch.errors import version_error_hint_message
from meilisearch.models.document import Document, DocumentsResults
from meilisearch.models.document import Document, DocumentsResults, FieldsResults
from meilisearch.models.embedders import (
CompositeEmbedder,
Embedders,
Expand Down Expand Up @@ -2555,6 +2555,66 @@ def reset_localized_attributes(self) -> TaskInfo:

return TaskInfo(**task)

def get_fields(
self,
offset: Optional[int] = None,
limit: Optional[int] = None,
filter: Optional[MutableMapping[str, Any]] = None, # pylint: disable=redefined-builtin
) -> FieldsResults:
"""Get all fields of the index.

Returns detailed metadata about all fields in the index, including
display, search, filtering, and localization settings for each field.

https://www.meilisearch.com/docs/reference/api/indexes#get-fields

Parameters
----------
offset (optional):
Number of fields to skip. Defaults to 0.
limit (optional):
Maximum number of fields to return. Defaults to 20.
filter (optional):
Dictionary containing filter configuration. All filter properties are optional
and can be combined using AND logic. Available filters:
- attributePatterns: List of attribute patterns (supports wildcards: * for any characters)
Examples: ["cuisine.*", "*_id"] matches cuisine.type and all fields ending with _id
- displayed: Boolean - true for only displayed fields, false for only hidden fields
- searchable: Boolean - true for only searchable fields, false for only non-searchable fields
- sortable: Boolean - true for only sortable fields, false for only non-sortable fields
- distinct: Boolean - true for only the distinct field, false for only non-distinct fields
- rankingRule: Boolean - true for only fields used in ranking, false for fields not used in ranking
- filterable: Boolean - true for only filterable fields, false for only non-filterable fields

Returns
-------
FieldsResults:
Object containing:
- results: List of field metadata dictionaries
- offset: Number of fields skipped
- limit: Maximum fields returned
- total: Total number of fields in the index

Raises
------
MeilisearchApiError
An error containing details about why Meilisearch can't process your request. Meilisearch error codes are described here: https://www.meilisearch.com/docs/reference/errors/error_codes#meilisearch-errors
"""
body: Dict[str, Any] = {}
if offset is not None:
body["offset"] = offset
if limit is not None:
body["limit"] = limit
if filter is not None:
body["filter"] = filter

response = self.http.post(
f"{self.config.paths.index}/{self.uid}/{self.config.paths.fields}",
body=body,
)

return FieldsResults(response)

@staticmethod
def _batch(
documents: Sequence[Mapping[str, Any]], batch_size: int
Expand Down
10 changes: 10 additions & 0 deletions meilisearch/models/document.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,13 @@ def __init__(self, resp: Dict[str, Any]) -> None:
self.offset: int = resp["offset"]
self.limit: int = resp["limit"]
self.total: int = resp["total"]


class FieldsResults:
"""Response object for get_fields containing pagination metadata and field list."""

def __init__(self, resp: Dict[str, Any]) -> None:
self.results: List[Dict[str, Any]] = resp["results"]
self.offset: int = resp["offset"]
self.limit: int = resp["limit"]
self.total: int = resp["total"]
96 changes: 96 additions & 0 deletions tests/index/test_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,3 +283,99 @@ def test_index_update_without_params(client):
index.update()

assert "primary_key" in str(exc.value) or "new_uid" in str(exc.value)


@pytest.mark.usefixtures("indexes_sample")
def test_get_fields(client, small_movies):
"""Tests getting all fields of an index via the new /fields endpoint."""
index = client.index(uid=common.INDEX_UID)
task = index.add_documents(small_movies)
client.wait_for_task(task.task_uid)

fields = index.get_fields()

assert hasattr(fields, "results")
assert hasattr(fields, "offset")
assert hasattr(fields, "limit")
assert hasattr(fields, "total")
assert isinstance(fields.results, list)
assert len(fields.results) > 0
assert "name" in fields.results[0]
assert "searchable" in fields.results[0]
assert "filterable" in fields.results[0]
assert "sortable" in fields.results[0]


@pytest.mark.usefixtures("indexes_sample")
def test_get_fields_with_configurations(client, small_movies):
"""Tests get_fields() reflects index settings configurations."""
index = client.index(uid=common.INDEX_UID)
task = index.add_documents(small_movies)
client.wait_for_task(task.task_uid)

task = index.update_searchable_attributes(["title"])
client.wait_for_task(task.task_uid)

fields = index.get_fields()
title_field = next((f for f in fields.results if f["name"] == "title"), None)

assert title_field is not None
assert title_field["searchable"]["enabled"] is True


@pytest.mark.usefixtures("indexes_sample")
def test_get_fields_with_filter(client, small_movies):
"""Tests get_fields() with filter parameters."""
index = client.index(uid=common.INDEX_UID)
task = index.add_documents(small_movies)
client.wait_for_task(task.task_uid)

task = index.update_searchable_attributes(["title"])
client.wait_for_task(task.task_uid)

# Filter only searchable fields
searchable_fields = index.get_fields(filter={"searchable": True})

assert isinstance(searchable_fields.results, list)
assert len(searchable_fields.results) > 0
assert all(field["searchable"]["enabled"] is True for field in searchable_fields.results)


@pytest.mark.usefixtures("indexes_sample")
def test_get_fields_with_pagination(client, small_movies):
"""Tests get_fields() with pagination parameters."""
index = client.index(uid=common.INDEX_UID)
task = index.add_documents(small_movies)
client.wait_for_task(task.task_uid)

# Get all fields first to know total count
all_fields = index.get_fields()
total_fields = all_fields.total

# Test pagination with offset and limit
page1 = index.get_fields(offset=0, limit=2)
assert isinstance(page1.results, list)
assert len(page1.results) <= 2
assert page1.offset == 0
assert page1.limit == 2

# If we have more than 2 fields, test second page
if total_fields > 2:
page2 = index.get_fields(offset=2, limit=2)
assert isinstance(page2.results, list)
assert len(page2.results) <= 2
assert page2.offset == 2

# Verify pages don't overlap
page1_names = {f["name"] for f in page1.results}
page2_names = {f["name"] for f in page2.results}
assert page1_names.isdisjoint(page2_names)

# Test with just limit (no offset)
limited = index.get_fields(limit=3)
assert isinstance(limited.results, list)
assert len(limited.results) <= 3

# Test with just offset (no limit, uses default)
offset_only = index.get_fields(offset=1)
assert isinstance(offset_only.results, list)