diff --git a/meilisearch/config.py b/meilisearch/config.py index 981b50c5..3b9fa457 100644 --- a/meilisearch/config.py +++ b/meilisearch/config.py @@ -45,6 +45,7 @@ class Paths: prefix_search = "prefix-search" proximity_precision = "proximity-precision" localized_attributes = "localized-attributes" + fields = "fields" edit = "edit" network = "network" experimental_features = "experimental-features" diff --git a/meilisearch/index.py b/meilisearch/index.py index 808bd08f..6c65874a 100644 --- a/meilisearch/index.py +++ b/meilisearch/index.py @@ -23,7 +23,7 @@ from meilisearch._utils import iso_to_date_time from meilisearch.config import Config from meilisearch.errors import version_error_hint_message -from meilisearch.models.document import Document, DocumentsResults +from meilisearch.models.document import Document, DocumentsResults, FieldsResults from meilisearch.models.embedders import ( CompositeEmbedder, Embedders, @@ -2555,6 +2555,66 @@ def reset_localized_attributes(self) -> TaskInfo: return TaskInfo(**task) + def get_fields( + self, + offset: Optional[int] = None, + limit: Optional[int] = None, + filter: Optional[MutableMapping[str, Any]] = None, # pylint: disable=redefined-builtin + ) -> FieldsResults: + """Get all fields of the index. + + Returns detailed metadata about all fields in the index, including + display, search, filtering, and localization settings for each field. + + https://www.meilisearch.com/docs/reference/api/indexes#get-fields + + Parameters + ---------- + offset (optional): + Number of fields to skip. Defaults to 0. + limit (optional): + Maximum number of fields to return. Defaults to 20. + filter (optional): + Dictionary containing filter configuration. All filter properties are optional + and can be combined using AND logic. Available filters: + - attributePatterns: List of attribute patterns (supports wildcards: * for any characters) + Examples: ["cuisine.*", "*_id"] matches cuisine.type and all fields ending with _id + - displayed: Boolean - true for only displayed fields, false for only hidden fields + - searchable: Boolean - true for only searchable fields, false for only non-searchable fields + - sortable: Boolean - true for only sortable fields, false for only non-sortable fields + - distinct: Boolean - true for only the distinct field, false for only non-distinct fields + - rankingRule: Boolean - true for only fields used in ranking, false for fields not used in ranking + - filterable: Boolean - true for only filterable fields, false for only non-filterable fields + + Returns + ------- + FieldsResults: + Object containing: + - results: List of field metadata dictionaries + - offset: Number of fields skipped + - limit: Maximum fields returned + - total: Total number of fields in the index + + Raises + ------ + MeilisearchApiError + An error containing details about why Meilisearch can't process your request. Meilisearch error codes are described here: https://www.meilisearch.com/docs/reference/errors/error_codes#meilisearch-errors + """ + body: Dict[str, Any] = {} + if offset is not None: + body["offset"] = offset + if limit is not None: + body["limit"] = limit + if filter is not None: + body["filter"] = filter + + response = self.http.post( + f"{self.config.paths.index}/{self.uid}/{self.config.paths.fields}", + body=body, + ) + + return FieldsResults(response) + @staticmethod def _batch( documents: Sequence[Mapping[str, Any]], batch_size: int diff --git a/meilisearch/models/document.py b/meilisearch/models/document.py index a484e0fc..8fe72f2b 100644 --- a/meilisearch/models/document.py +++ b/meilisearch/models/document.py @@ -20,3 +20,13 @@ def __init__(self, resp: Dict[str, Any]) -> None: self.offset: int = resp["offset"] self.limit: int = resp["limit"] self.total: int = resp["total"] + + +class FieldsResults: + """Response object for get_fields containing pagination metadata and field list.""" + + def __init__(self, resp: Dict[str, Any]) -> None: + self.results: List[Dict[str, Any]] = resp["results"] + self.offset: int = resp["offset"] + self.limit: int = resp["limit"] + self.total: int = resp["total"] diff --git a/tests/index/test_index.py b/tests/index/test_index.py index 38b11943..0894d3ae 100644 --- a/tests/index/test_index.py +++ b/tests/index/test_index.py @@ -283,3 +283,99 @@ def test_index_update_without_params(client): index.update() assert "primary_key" in str(exc.value) or "new_uid" in str(exc.value) + + +@pytest.mark.usefixtures("indexes_sample") +def test_get_fields(client, small_movies): + """Tests getting all fields of an index via the new /fields endpoint.""" + index = client.index(uid=common.INDEX_UID) + task = index.add_documents(small_movies) + client.wait_for_task(task.task_uid) + + fields = index.get_fields() + + assert hasattr(fields, "results") + assert hasattr(fields, "offset") + assert hasattr(fields, "limit") + assert hasattr(fields, "total") + assert isinstance(fields.results, list) + assert len(fields.results) > 0 + assert "name" in fields.results[0] + assert "searchable" in fields.results[0] + assert "filterable" in fields.results[0] + assert "sortable" in fields.results[0] + + +@pytest.mark.usefixtures("indexes_sample") +def test_get_fields_with_configurations(client, small_movies): + """Tests get_fields() reflects index settings configurations.""" + index = client.index(uid=common.INDEX_UID) + task = index.add_documents(small_movies) + client.wait_for_task(task.task_uid) + + task = index.update_searchable_attributes(["title"]) + client.wait_for_task(task.task_uid) + + fields = index.get_fields() + title_field = next((f for f in fields.results if f["name"] == "title"), None) + + assert title_field is not None + assert title_field["searchable"]["enabled"] is True + + +@pytest.mark.usefixtures("indexes_sample") +def test_get_fields_with_filter(client, small_movies): + """Tests get_fields() with filter parameters.""" + index = client.index(uid=common.INDEX_UID) + task = index.add_documents(small_movies) + client.wait_for_task(task.task_uid) + + task = index.update_searchable_attributes(["title"]) + client.wait_for_task(task.task_uid) + + # Filter only searchable fields + searchable_fields = index.get_fields(filter={"searchable": True}) + + assert isinstance(searchable_fields.results, list) + assert len(searchable_fields.results) > 0 + assert all(field["searchable"]["enabled"] is True for field in searchable_fields.results) + + +@pytest.mark.usefixtures("indexes_sample") +def test_get_fields_with_pagination(client, small_movies): + """Tests get_fields() with pagination parameters.""" + index = client.index(uid=common.INDEX_UID) + task = index.add_documents(small_movies) + client.wait_for_task(task.task_uid) + + # Get all fields first to know total count + all_fields = index.get_fields() + total_fields = all_fields.total + + # Test pagination with offset and limit + page1 = index.get_fields(offset=0, limit=2) + assert isinstance(page1.results, list) + assert len(page1.results) <= 2 + assert page1.offset == 0 + assert page1.limit == 2 + + # If we have more than 2 fields, test second page + if total_fields > 2: + page2 = index.get_fields(offset=2, limit=2) + assert isinstance(page2.results, list) + assert len(page2.results) <= 2 + assert page2.offset == 2 + + # Verify pages don't overlap + page1_names = {f["name"] for f in page1.results} + page2_names = {f["name"] for f in page2.results} + assert page1_names.isdisjoint(page2_names) + + # Test with just limit (no offset) + limited = index.get_fields(limit=3) + assert isinstance(limited.results, list) + assert len(limited.results) <= 3 + + # Test with just offset (no limit, uses default) + offset_only = index.get_fields(offset=1) + assert isinstance(offset_only.results, list)