diff --git a/CHANGELOG.md b/CHANGELOG.md index ebcc884af7c..993f8a48ca4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ * [FEATURE] Query-frontends: Automatically adjust features used in query plans generated for remote execution based on what the available queriers support. #13017 #13164 * [FEATURE] Memberlist: Add experimental support for zone-aware routing, in order to reduce memberlist cross-AZ data transfer. #13129 * [FEATURE] Query-frontend and querier: Add experimental support for performing query planning in query-frontends and distributing portions of the plan to queriers for execution. #13058 +* [FEATURE] MQE: Add support for experimental extended range selector modifiers `smoothed` and `anchored`. You can enable these modifiers with `-query-frontend.enabled-promql-extended-range-selectors=smoothed,anchored` #13398 * [FEATURE] Querier: Add `querier.mimir-query-engine.enable-reduce-matchers` flag that enables a new MQE AST optimization pass that eliminates duplicate or redundant matchers that are part of selector expressions. #13178 * [ENHANCEMENT] Compactor, Store-gateway: Change default value of `-compactor.upload-sparse-index-headers` to `true`. This improves lazy loading performance in the store-gateway. #13089 * [ENHANCEMENT] Store-gateway: Verify CRC32 checksums for 1 out of every 128 chunks read from object storage and the chunks cache to detect corruption. #13151 @@ -55,6 +56,7 @@ * `-common.storage.gcs.max-retries` * `-ruler-storage.gcs.max-retries` * [ENHANCEMENT] Usage-tracker: Improve first snapshot loading & rehash speed. #13284 +* [ENHANCEMENT] Query-frontend: Limits middleware will record different error messages when experimental functions, aggregations, or extended range selector modifiers are used but not enabled for a tenant. #13398 * [ENHANCEMENT] Usage-tracker, distributor: Make usage-tracker calls asynchronous for users who are far enough from the series limits. #13427 * [ENHANCEMENT] Ruler: Implemented `OperatorControllableErrorClassifier` for rule evaluation, allowing differentiation between operator-controllable errors (e.g., storage failures, 5xx errors, rate limiting) and user-controllable errors (e.g., bad queries, validation errors, 4xx errors). This change affects the rule evaluation failure metric `prometheus_rule_evaluation_failures_total`, which now includes a `reason` label with values `operator` or `user` to distinguish between them. #13313, #13470 * [ENHANCEMENT] Store-gateway: Added `cortex_bucket_store_block_discovery_latency_seconds` metric to track time from block creation to discovery by store-gateway. #13489 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 96e2abe580d..a8bd0a23fdf 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -5753,6 +5753,16 @@ "fieldFlag": "query-frontend.enabled-promql-experimental-functions", "fieldType": "string" }, + { + "kind": "field", + "name": "enabled_promql_extended_range_selectors", + "required": false, + "desc": "Enable certain experimental PromQL extended range selector modifiers, which are subject to being changed or removed at any time, on a per-tenant basis. Defaults to empty which means all experimental modifiers are disabled. Set to 'all' to enable all experimental modifiers.", + "fieldValue": null, + "fieldDefaultValue": "", + "fieldFlag": "query-frontend.enabled-promql-extended-range-selectors", + "fieldType": "string" + }, { "kind": "field", "name": "prom2_range_compat", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 979ba999bed..973467a8316 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -2351,6 +2351,8 @@ Usage of ./cmd/mimir/mimir: [experimental] If set to true and the Mimir query engine is in use, use remote execution to evaluate queries in queriers. -query-frontend.enabled-promql-experimental-functions comma-separated-list-of-strings Enable certain experimental PromQL functions, which are subject to being changed or removed at any time, on a per-tenant basis. Defaults to empty which means all experimental functions are disabled. Set to 'all' to enable all experimental functions. + -query-frontend.enabled-promql-extended-range-selectors comma-separated-list-of-strings + Enable certain experimental PromQL extended range selector modifiers, which are subject to being changed or removed at any time, on a per-tenant basis. Defaults to empty, which means all experimental modifiers are disabled. Set to 'all' to enable all experimental modifiers. -query-frontend.extra-propagated-headers comma-separated-list-of-strings Comma-separated list of request header names to allow to pass through to the rest of the query path. This is in addition to a list of required headers that the read path needs. -query-frontend.grpc-client-config.backoff-max-period duration diff --git a/cmd/mimir/help.txt.tmpl b/cmd/mimir/help.txt.tmpl index 1febb81d975..1e0d5d7fa81 100644 --- a/cmd/mimir/help.txt.tmpl +++ b/cmd/mimir/help.txt.tmpl @@ -603,6 +603,8 @@ Usage of ./cmd/mimir/mimir: Cache statistics of processed samples on results cache. -query-frontend.enabled-promql-experimental-functions comma-separated-list-of-strings Enable certain experimental PromQL functions, which are subject to being changed or removed at any time, on a per-tenant basis. Defaults to empty which means all experimental functions are disabled. Set to 'all' to enable all experimental functions. + -query-frontend.enabled-promql-extended-range-selectors comma-separated-list-of-strings + Enable certain experimental PromQL extended range selector modifiers, which are subject to being changed or removed at any time, on a per-tenant basis. Defaults to empty which means all experimental modifiers are disabled. Set to 'all' to enable all experimental modifiers. -query-frontend.log-queries-longer-than duration Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries. -query-frontend.max-queriers-per-tenant int diff --git a/docs/sources/mimir/configure/about-versioning.md b/docs/sources/mimir/configure/about-versioning.md index 1b21c5fc9dc..d10101625c8 100644 --- a/docs/sources/mimir/configure/about-versioning.md +++ b/docs/sources/mimir/configure/about-versioning.md @@ -211,6 +211,7 @@ The following features are currently experimental: - Remote execution of queries in queriers: `-query-frontend.enable-remote-execution=true` - Performing query sharding within MQE: `-query-frontend.use-mimir-query-engine-for-sharding=true` - Rewriting of queries to optimize processing: `-query-frontend.rewrite-histogram-queries` and `-query-frontend.rewrite-propagate-matchers` + - Enable experimental Prometheus extended range selector modifiers `smoothed` and `anchored` (`-query-frontend.enabled-promql-extended-range-selectors=smoothed,anchored`) - Query-scheduler - `-query-scheduler.querier-forget-delay` - Store-gateway diff --git a/docs/sources/mimir/configure/configuration-parameters/index.md b/docs/sources/mimir/configure/configuration-parameters/index.md index 9a88e855ede..18f431ae887 100644 --- a/docs/sources/mimir/configure/configuration-parameters/index.md +++ b/docs/sources/mimir/configure/configuration-parameters/index.md @@ -3995,6 +3995,13 @@ blocked_requests: # CLI flag: -query-frontend.enabled-promql-experimental-functions [enabled_promql_experimental_functions: | default = ""] +# Enable certain experimental PromQL extended range selector modifiers, which +# are subject to being changed or removed at any time, on a per-tenant basis. +# Defaults to empty which means all experimental modifiers are disabled. Set to +# 'all' to enable all experimental modifiers. +# CLI flag: -query-frontend.enabled-promql-extended-range-selectors +[enabled_promql_extended_range_selectors: | default = ""] + # (experimental) Rewrite queries using the same range selector and resolution # [X:X] which don't work in Prometheus 3.0 to a nearly identical form that works # with Prometheus 3.0 semantics diff --git a/operations/mimir/mimir-flags-defaults.json b/operations/mimir/mimir-flags-defaults.json index b626240802b..0d2ab66a59e 100644 --- a/operations/mimir/mimir-flags-defaults.json +++ b/operations/mimir/mimir-flags-defaults.json @@ -393,6 +393,7 @@ "query-frontend.results-cache-ttl-for-errors": 300000000000, "query-frontend.max-query-expression-size-bytes": 0, "query-frontend.enabled-promql-experimental-functions": "", + "query-frontend.enabled-promql-extended-range-selectors": "", "query-frontend.labels-query-optimizer-enabled": true, "querier.label-names-and-values-results-max-size-bytes": 419430400, "querier.label-values-max-cardinality-label-names-per-request": 100, diff --git a/pkg/frontend/querymiddleware/astmapper/astmapper.go b/pkg/frontend/querymiddleware/astmapper/astmapper.go index ddc19fe200a..d5db620835e 100644 --- a/pkg/frontend/querymiddleware/astmapper/astmapper.go +++ b/pkg/frontend/querymiddleware/astmapper/astmapper.go @@ -144,6 +144,8 @@ func CloneExpr(expr parser.Expr) (parser.Expr, error) { LabelMatchers: matchers, BypassEmptyMatcherCheck: e.BypassEmptyMatcherCheck, PosRange: e.PosRange, + Anchored: e.Anchored, + Smoothed: e.Smoothed, }, nil case *parser.MatrixSelector: diff --git a/pkg/frontend/querymiddleware/astmapper/astmapper_test.go b/pkg/frontend/querymiddleware/astmapper/astmapper_test.go index 5294f000a36..57490096818 100644 --- a/pkg/frontend/querymiddleware/astmapper/astmapper_test.go +++ b/pkg/frontend/querymiddleware/astmapper/astmapper_test.go @@ -139,6 +139,12 @@ func TestCloneExpr(t *testing.T) { `foo and bar`, `foo == bar`, `foo == bool bar`, + + // Range modifiers + `metric[1m] anchored`, + `metric[1m] smoothed`, + `rate(metric[1m] anchored)`, + `increase(metric[1m] smoothed)`, } for i, tc := range testCases { @@ -245,11 +251,14 @@ func loadTestExpressionsFromDirectory(t *testing.T, dir string, accumulatedExpre func enableExperimentalParserFeaturesDuringTest(t *testing.T) { oldDurationExpressions := parser.ExperimentalDurationExpr oldExperimentalFunctions := parser.EnableExperimentalFunctions + oldEnableExtendedRangeSelectors := parser.EnableExtendedRangeSelectors parser.ExperimentalDurationExpr = true parser.EnableExperimentalFunctions = true + parser.EnableExtendedRangeSelectors = true t.Cleanup(func() { parser.ExperimentalDurationExpr = oldDurationExpressions parser.EnableExperimentalFunctions = oldExperimentalFunctions + parser.EnableExtendedRangeSelectors = oldEnableExtendedRangeSelectors }) } diff --git a/pkg/frontend/querymiddleware/experimental_features.go b/pkg/frontend/querymiddleware/experimental_features.go new file mode 100644 index 00000000000..a01bbfe19f9 --- /dev/null +++ b/pkg/frontend/querymiddleware/experimental_features.go @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querymiddleware + +import ( + "context" + "fmt" + "slices" + + "github.com/go-kit/log" + "github.com/grafana/dskit/tenant" + "github.com/prometheus/prometheus/promql/parser" + + apierror "github.com/grafana/mimir/pkg/api/error" +) + +const ( + allExperimentalFeatures = "all" +) + +type experimentalFeatureType struct { + label string +} + +var functionType = experimentalFeatureType{label: "function"} +var aggregationType = experimentalFeatureType{label: "aggregation"} +var extendedRangeSelectorModifierType = experimentalFeatureType{label: "extended range selector modifier"} + +// experimentalFeaturesMiddleware manages the per-tenant access to experimental functions, aggregations and extended range selector modifiers. +type experimentalFeaturesMiddleware struct { + next MetricsQueryHandler + limits Limits + logger log.Logger +} + +// newExperimentalFeaturesMiddleware creates a middleware that blocks queries that contain PromQL experimental functions, aggregates or range selector modifiers +// that are not enabled for the active tenant(s), allowing us to enable specific features only for selected tenants. +func newExperimentalFeaturesMiddleware(limits Limits, logger log.Logger) MetricsQueryMiddleware { + return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler { + return &experimentalFeaturesMiddleware{ + next: next, + limits: limits, + logger: logger, + } + }) +} + +func (m *experimentalFeaturesMiddleware) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, apierror.New(apierror.TypeBadData, err.Error()) + } + + enabledExperimentalFunctions := make(map[string][]string, len(tenantIDs)) + allExperimentalFunctionsEnabled := true + for _, tenantID := range tenantIDs { + // note that this includes both functions and aggregations (ie limitk, limit_ratio) + enabled := m.limits.EnabledPromQLExperimentalFunctions(tenantID) + enabledExperimentalFunctions[tenantID] = enabled + if len(enabled) == 0 || enabled[0] != allExperimentalFeatures { + allExperimentalFunctionsEnabled = false + } + } + + enabledExtendedRangeSelectors := make(map[string][]string, len(tenantIDs)) + allExtendedRangeSelectorsEnabled := true + for _, tenantID := range tenantIDs { + enabled := m.limits.EnabledPromQLExtendedRangeSelectors(tenantID) + enabledExtendedRangeSelectors[tenantID] = enabled + if len(enabled) == 0 || enabled[0] != allExperimentalFeatures { + allExtendedRangeSelectorsEnabled = false + } + } + + if allExperimentalFunctionsEnabled && allExtendedRangeSelectorsEnabled { + // If all experimental functions are enabled for all tenants here, we don't need to check the query + // for those functions and can skip this middleware. + return m.next.Do(ctx, req) + } + + expr := req.GetParsedQuery() + features := containedExperimentalFeatures(expr) + if len(features) == 0 { + // This query does not contain any experimental functions, so we can continue to the next middleware. + return m.next.Do(ctx, req) + } + + // Make sure that every used experimental feature is enabled for all the tenants here. + var tenantMap map[string][]string + for feature, featureType := range features { + switch featureType { + case functionType, aggregationType: + tenantMap = enabledExperimentalFunctions + case extendedRangeSelectorModifierType: + tenantMap = enabledExtendedRangeSelectors + } + + for _, enabled := range tenantMap { + if len(enabled) > 0 && enabled[0] == allExperimentalFeatures { + // If the first item matches the const value of allExperimentalFeatures, then all experimental + // features are enabled for this tenant. + continue + } + if !slices.Contains(enabled, feature) { + return nil, createExperimentalFeatureError(featureType, feature) + } + } + } + + // Every used experimental feature is enabled for the tenant(s). + return m.next.Do(ctx, req) +} + +func createExperimentalFeatureError(featureType experimentalFeatureType, feature string) error { + err := fmt.Errorf("experimental %s %q is not enabled for tenant", featureType.label, feature) + return apierror.New(apierror.TypeBadData, DecorateWithParamName(err, "query").Error()) +} + +// containedExperimentalFeatures returns any PromQL experimental functions, aggregations or range selector modifiers used in the query. +func containedExperimentalFeatures(expr parser.Expr) map[string]experimentalFeatureType { + expFuncNames := map[string]experimentalFeatureType{} + _ = inspect(expr, func(node parser.Node) error { + switch n := node.(type) { + case *parser.Call: + if parser.Functions[n.Func.Name].Experimental { + expFuncNames[n.Func.Name] = functionType + } + case *parser.AggregateExpr: + if n.Op.IsExperimentalAggregator() { + expFuncNames[n.Op.String()] = aggregationType + } + case *parser.MatrixSelector: + vs, ok := n.VectorSelector.(*parser.VectorSelector) + if ok && vs.Anchored { + expFuncNames["anchored"] = extendedRangeSelectorModifierType + } else if ok && vs.Smoothed { + expFuncNames["smoothed"] = extendedRangeSelectorModifierType + } + case *parser.VectorSelector: + if n.Smoothed { + expFuncNames["smoothed"] = extendedRangeSelectorModifierType + } + } + return nil + }) + return expFuncNames +} diff --git a/pkg/frontend/querymiddleware/experimental_features_test.go b/pkg/frontend/querymiddleware/experimental_features_test.go new file mode 100644 index 00000000000..b243af83948 --- /dev/null +++ b/pkg/frontend/querymiddleware/experimental_features_test.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querymiddleware + +import ( + "testing" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/stretchr/testify/require" +) + +func TestContainedExperimentalFunctions(t *testing.T) { + t.Cleanup(func() { + parser.EnableExperimentalFunctions = false + parser.EnableExtendedRangeSelectors = false + }) + parser.EnableExperimentalFunctions = true + parser.EnableExtendedRangeSelectors = true + + testCases := map[string]struct { + query string + expect []string + err string + }{ + "sum by": { + query: `sum(up) by (namespace)`, + err: `function "sum by" is not enabled for tenant`, + }, + "mad_over_time": { + query: `mad_over_time(up[5m])`, + expect: []string{"mad_over_time"}, + err: `function "mad_over_time" is not enabled for tenant`, + }, + "mad_over_time with sum and by": { + query: `sum(mad_over_time(up[5m])) by (namespace)`, + expect: []string{"mad_over_time"}, + err: `function "mad_over_time" is not enabled for tenant`, + }, + "sort_by_label": { + query: `sort_by_label({__name__=~".+"}, "__name__")`, + expect: []string{"sort_by_label"}, + err: `function "sort_by_label" is not enabled for tenant`, + }, + "sort_by_label_desc": { + query: `sort_by_label_desc({__name__=~".+"}, "__name__")`, + expect: []string{"sort_by_label_desc"}, + err: `function "sort_by_label_desc" is not enabled for tenant`, + }, + "limitk": { + query: `limitk by (group) (0, up)`, + expect: []string{"limitk"}, + err: `aggregation "limitk" is not enabled for tenant`, + }, + "limit_ratio": { + query: `limit_ratio(0.5, up)`, + expect: []string{"limit_ratio"}, + err: `aggregation "limit_ratio" is not enabled for tenant`, + }, + "limit_ratio with mad_over_time": { + query: `limit_ratio(0.5, mad_over_time(up[5m]))`, + expect: []string{"limit_ratio", "mad_over_time"}, + }, + "metric smoothed": { + query: `metric smoothed`, + expect: []string{"smoothed"}, + err: `extended range selector modifier "smoothed" is not enabled for tenant`, + }, + "metric[1m] smoothed": { + query: `metric[1m] smoothed`, + expect: []string{"smoothed"}, + err: `extended range selector modifier "smoothed" is not enabled for tenant`, + }, + "metric[1m] anchored": { + query: `metric[1m] anchored`, + expect: []string{"anchored"}, + err: `extended range selector modifier "anchored" is not enabled for tenant`, + }, + "rate(metric[1m] smoothed)": { + query: `rate(metric[1m] smoothed)`, + expect: []string{"smoothed"}, + err: `extended range selector modifier "smoothed" is not enabled for tenant`, + }, + "increase(metric[1m] anchored)": { + query: `increase(metric[1m] anchored)`, + expect: []string{"anchored"}, + err: `extended range selector modifier "anchored" is not enabled for tenant`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + expr, err := parser.ParseExpr(tc.query) + require.NoError(t, err) + var enabled []string + for op, opType := range containedExperimentalFeatures(expr) { + enabled = append(enabled, op) + if len(tc.err) > 0 { + // test that if an error was raised for this function/aggregate/modifier that the expected error string is formed + // an empty tc.err allows for the case to be skipped - such as where we have multiple errors which are validated elsewhere + err := createExperimentalFeatureError(opType, op) + require.ErrorContains(t, err, tc.err) + } + } + require.ElementsMatch(t, tc.expect, enabled) + }) + } +} diff --git a/pkg/frontend/querymiddleware/experimental_functions.go b/pkg/frontend/querymiddleware/experimental_functions.go deleted file mode 100644 index 81ee5d15334..00000000000 --- a/pkg/frontend/querymiddleware/experimental_functions.go +++ /dev/null @@ -1,108 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package querymiddleware - -import ( - "context" - "fmt" - "slices" - - "github.com/go-kit/log" - "github.com/grafana/dskit/tenant" - "github.com/prometheus/prometheus/promql/parser" - - apierror "github.com/grafana/mimir/pkg/api/error" - "github.com/grafana/mimir/pkg/frontend/querymiddleware/astmapper" -) - -const ( - allExperimentalFunctions = "all" -) - -type experimentalFunctionsMiddleware struct { - next MetricsQueryHandler - limits Limits - logger log.Logger -} - -// newExperimentalFunctionsMiddleware creates a middleware that blocks queries that contain PromQL experimental functions -// that are not enabled for the active tenant(s), allowing us to enable specific functions only for selected tenants. -func newExperimentalFunctionsMiddleware(limits Limits, logger log.Logger) MetricsQueryMiddleware { - return MetricsQueryMiddlewareFunc(func(next MetricsQueryHandler) MetricsQueryHandler { - return &experimentalFunctionsMiddleware{ - next: next, - limits: limits, - logger: logger, - } - }) -} - -func (m *experimentalFunctionsMiddleware) Do(ctx context.Context, req MetricsQueryRequest) (Response, error) { - tenantIDs, err := tenant.TenantIDs(ctx) - if err != nil { - return nil, apierror.New(apierror.TypeBadData, err.Error()) - } - - enabledExperimentalFunctions := make(map[string][]string, len(tenantIDs)) - allExperimentalFunctionsEnabled := true - for _, tenantID := range tenantIDs { - enabled := m.limits.EnabledPromQLExperimentalFunctions(tenantID) - enabledExperimentalFunctions[tenantID] = enabled - if len(enabled) == 0 || enabled[0] != allExperimentalFunctions { - allExperimentalFunctionsEnabled = false - } - } - - if allExperimentalFunctionsEnabled { - // If all experimental functions are enabled for all tenants here, we don't need to check the query - // for those functions and can skip this middleware. - return m.next.Do(ctx, req) - } - - expr, err := astmapper.CloneExpr(req.GetParsedQuery()) - if err != nil { - return nil, apierror.New(apierror.TypeBadData, DecorateWithParamName(err, "query").Error()) - } - funcs := containedExperimentalFunctions(expr) - if len(funcs) == 0 { - // This query does not contain any experimental functions, so we can continue to the next middleware. - return m.next.Do(ctx, req) - } - - // Make sure that every used experimental function is enabled for all the tenants here. - for name := range funcs { - for tenantID, enabled := range enabledExperimentalFunctions { - if len(enabled) > 0 && enabled[0] == allExperimentalFunctions { - // If the first item matches the const value of allExperimentalFunctions, then all experimental - // functions are enabled for this tenant. - continue - } - if !slices.Contains(enabled, name) { - err := fmt.Errorf("function %q is not enabled for tenant %s", name, tenantID) - return nil, apierror.New(apierror.TypeBadData, DecorateWithParamName(err, "query").Error()) - } - } - } - - // Every used experimental function is enabled for the tenant(s). - return m.next.Do(ctx, req) -} - -// containedExperimentalFunctions returns any PromQL experimental functions used in the query. -func containedExperimentalFunctions(expr parser.Expr) map[string]struct{} { - expFuncNames := map[string]struct{}{} - _ = inspect(expr, func(node parser.Node) error { - switch n := node.(type) { - case *parser.Call: - if parser.Functions[n.Func.Name].Experimental { - expFuncNames[n.Func.Name] = struct{}{} - } - case *parser.AggregateExpr: - if n.Op.IsExperimentalAggregator() { - expFuncNames[n.Op.String()] = struct{}{} - } - } - return nil - }) - return expFuncNames -} diff --git a/pkg/frontend/querymiddleware/experimental_functions_test.go b/pkg/frontend/querymiddleware/experimental_functions_test.go deleted file mode 100644 index 0c7683e3f4f..00000000000 --- a/pkg/frontend/querymiddleware/experimental_functions_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package querymiddleware - -import ( - "testing" - - "github.com/prometheus/prometheus/promql/parser" - "github.com/stretchr/testify/require" -) - -func TestContainedExperimentalFunctions(t *testing.T) { - t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) - parser.EnableExperimentalFunctions = true - - testCases := map[string]struct { - query string - expect []string - }{ - "sum by": { - query: `sum(up) by (namespace)`, - }, - "mad_over_time": { - query: `mad_over_time(up[5m])`, - expect: []string{"mad_over_time"}, - }, - "mad_over_time with sum and by": { - query: `sum(mad_over_time(up[5m])) by (namespace)`, - expect: []string{"mad_over_time"}, - }, - "sort_by_label": { - query: `sort_by_label({__name__=~".+"}, "__name__")`, - expect: []string{"sort_by_label"}, - }, - "sort_by_label_desc": { - query: `sort_by_label_desc({__name__=~".+"}, "__name__")`, - expect: []string{"sort_by_label_desc"}, - }, - "limitk": { - query: `limitk by (group) (0, up)`, - expect: []string{"limitk"}, - }, - "limit_ratio": { - query: `limit_ratio(0.5, up)`, - expect: []string{"limit_ratio"}, - }, - "limit_ratio with mad_over_time": { - query: `limit_ratio(0.5, mad_over_time(up[5m]))`, - expect: []string{"limit_ratio", "mad_over_time"}, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - expr, err := parser.ParseExpr(tc.query) - require.NoError(t, err) - var enabled []string - for key := range containedExperimentalFunctions(expr) { - enabled = append(enabled, key) - } - require.ElementsMatch(t, tc.expect, enabled) - }) - } -} diff --git a/pkg/frontend/querymiddleware/limits.go b/pkg/frontend/querymiddleware/limits.go index 6df1d65a753..a1d6cb77800 100644 --- a/pkg/frontend/querymiddleware/limits.go +++ b/pkg/frontend/querymiddleware/limits.go @@ -101,6 +101,9 @@ type Limits interface { // EnabledPromQLExperimentalFunctions returns the names of PromQL experimental functions allowed for the tenant. EnabledPromQLExperimentalFunctions(userID string) []string + // EnabledPromQLExtendedRangeSelectors returns the names of PromQL experimental extended range selectors modifiers allowed for the tenant. + EnabledPromQLExtendedRangeSelectors(userID string) []string + // Prom2RangeCompat returns if Prometheus 2/3 range compatibility fixes are enabled for the tenant. Prom2RangeCompat(userID string) bool diff --git a/pkg/frontend/querymiddleware/limits_test.go b/pkg/frontend/querymiddleware/limits_test.go index ec5873ab3c6..b6de154dc47 100644 --- a/pkg/frontend/querymiddleware/limits_test.go +++ b/pkg/frontend/querymiddleware/limits_test.go @@ -675,6 +675,10 @@ func (m multiTenantMockLimits) EnabledPromQLExperimentalFunctions(userID string) return m.byTenant[userID].enabledPromQLExperimentalFunctions } +func (m multiTenantMockLimits) EnabledPromQLExtendedRangeSelectors(userID string) []string { + return m.byTenant[userID].enabledPromQLExtendedRangeSelectors +} + func (m multiTenantMockLimits) Prom2RangeCompat(userID string) bool { return m.byTenant[userID].prom2RangeCompat } @@ -741,6 +745,7 @@ type mockLimits struct { resultsCacheTTLForErrors time.Duration resultsCacheForUnalignedQueryEnabled bool enabledPromQLExperimentalFunctions []string + enabledPromQLExtendedRangeSelectors []string prom2RangeCompat bool blockedQueries []validation.BlockedQuery limitedQueries []validation.LimitedQuery @@ -838,6 +843,10 @@ func (m mockLimits) EnabledPromQLExperimentalFunctions(string) []string { return m.enabledPromQLExperimentalFunctions } +func (m mockLimits) EnabledPromQLExtendedRangeSelectors(string) []string { + return m.enabledPromQLExtendedRangeSelectors +} + func (m mockLimits) Prom2RangeCompat(string) bool { return m.prom2RangeCompat } diff --git a/pkg/frontend/querymiddleware/roundtrip.go b/pkg/frontend/querymiddleware/roundtrip.go index 0ea619eded4..bf2217e1682 100644 --- a/pkg/frontend/querymiddleware/roundtrip.go +++ b/pkg/frontend/querymiddleware/roundtrip.go @@ -264,6 +264,9 @@ func newQueryTripperware( // This enables duration arithmetic https://github.com/prometheus/prometheus/pull/16249. parser.ExperimentalDurationExpr = true + // This enables the anchored and smoothed selector modifiers + parser.EnableExtendedRangeSelectors = true + var c cache.Cache if cfg.CacheResults || cfg.cardinalityBasedShardingEnabled() { var err error @@ -483,7 +486,7 @@ func newQueryMiddlewares( // Does not apply to remote read as those are executed remotely and the enabling of PromQL experimental // functions for those are not controlled here. - experimentalFunctionsMiddleware := newExperimentalFunctionsMiddleware(limits, log) + experimentalFunctionsMiddleware := newExperimentalFeaturesMiddleware(limits, log) queryRangeMiddleware = append( queryRangeMiddleware, newInstrumentMiddleware("experimental_functions", metrics), diff --git a/pkg/frontend/querymiddleware/roundtrip_test.go b/pkg/frontend/querymiddleware/roundtrip_test.go index 813df271acc..0c08b734ca6 100644 --- a/pkg/frontend/querymiddleware/roundtrip_test.go +++ b/pkg/frontend/querymiddleware/roundtrip_test.go @@ -628,7 +628,7 @@ func TestMiddlewaresConsistency(t *testing.T) { "splitAndCacheMiddleware", // No time splitting and results cache support. "stepAlignMiddleware", // Not applicable because remote read requests don't take step in account when running in Mimir. "rewriteMiddleware", // No query rewriting support. - "experimentalFunctionsMiddleware", // No blocking for PromQL experimental functions as it is executed remotely. + "experimentalOperatorsMiddleware", // No blocking for PromQL experimental functions as it is executed remotely. "durationsMiddleware", // No duration expressions support. "prom2RangeCompatHandler", // No rewriting Prometheus 2 subqueries to Prometheus 3 "spinOffSubqueriesMiddleware", // This middleware is only for instant queries. diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index d09070ac668..1b3e1cc3dde 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -197,6 +197,9 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, quer // This enables duration arithmetic https://github.com/prometheus/prometheus/pull/16249. parser.ExperimentalDurationExpr = true + // This enables the anchored and smoothed selector modifiers + parser.EnableExtendedRangeSelectors = true + var eng promql.QueryEngine var streamingEngine *streamingpromql.Engine diff --git a/pkg/streamingpromql/benchmarks/benchmarks.go b/pkg/streamingpromql/benchmarks/benchmarks.go index ee736bb4273..6ed43a5ce3d 100644 --- a/pkg/streamingpromql/benchmarks/benchmarks.go +++ b/pkg/streamingpromql/benchmarks/benchmarks.go @@ -102,6 +102,20 @@ func TestCases(metricSizes []int) []BenchCase { Expr: "rate(nh_X[1m])", Steps: 10000, }, + { + Expr: "rate(a_X[1m] smoothed)", + }, + { + Expr: "rate(a_X[1m] smoothed)", + Steps: 10000, + }, + { + Expr: "rate(a_X[1m] anchored)", + }, + { + Expr: "rate(a_X[1m] anchored)", + Steps: 10000, + }, //// Holt-Winters and long ranges. //{ // Expr: "holt_winters(a_X[1d], 0.3, 0.3)", diff --git a/pkg/streamingpromql/benchmarks/comparison_test.go b/pkg/streamingpromql/benchmarks/comparison_test.go index 395423917ce..20f35ef461a 100644 --- a/pkg/streamingpromql/benchmarks/comparison_test.go +++ b/pkg/streamingpromql/benchmarks/comparison_test.go @@ -7,6 +7,7 @@ package benchmarks import ( "context" + "github.com/prometheus/prometheus/promql/parser" "math" "os" "testing" @@ -38,6 +39,10 @@ import ( // This is based on the benchmarks from https://github.com/prometheus/prometheus/blob/main/promql/bench_test.go. func BenchmarkQuery(b *testing.B) { + extendedRangeSelectors := parser.EnableExtendedRangeSelectors + parser.EnableExtendedRangeSelectors = true + defer func() { parser.EnableExtendedRangeSelectors = extendedRangeSelectors }() + // Important: the setup below must remain in sync with the setup done in tools/benchmark-query-engine. q := createBenchmarkQueryable(b, MetricSizes) cases := TestCases(MetricSizes) @@ -92,6 +97,10 @@ func BenchmarkQuery(b *testing.B) { } func TestBothEnginesReturnSameResultsForBenchmarkQueries(t *testing.T) { + extendedRangeSelectors := parser.EnableExtendedRangeSelectors + parser.EnableExtendedRangeSelectors = true + defer func() { parser.EnableExtendedRangeSelectors = extendedRangeSelectors }() + metricSizes := []int{1, 100} // Don't bother with 2000 series test here: these test cases take a while and they're most interesting as benchmarks, not correctness tests. q := createBenchmarkQueryable(t, metricSizes) cases := TestCases(metricSizes) diff --git a/pkg/streamingpromql/engine_test.go b/pkg/streamingpromql/engine_test.go index 14dcc7e3b0a..909261ce040 100644 --- a/pkg/streamingpromql/engine_test.go +++ b/pkg/streamingpromql/engine_test.go @@ -59,6 +59,7 @@ func init() { types.EnableManglingReturnedSlices = true parser.ExperimentalDurationExpr = true parser.EnableExperimentalFunctions = true + parser.EnableExtendedRangeSelectors = true // Set a tracer provider with in memory span exporter so we can check the spans later. otel.SetTracerProvider( @@ -221,7 +222,6 @@ func TestOurTestCases(t *testing.T) { return mimirEngine, prometheusEngine } - opts := NewTestEngineOpts() mimirEngine, prometheusEngine := makeEngines(t, opts) @@ -4468,6 +4468,393 @@ func TestEngine_RegisterNodeMaterializer(t *testing.T) { require.EqualError(t, engine.RegisterNodeMaterializer(nodeType, materializer), "materializer for node type 1234 already registered", "should fail to register materializer again if already registered") } +// TestExtendedRangeSelectors has tests specific to the anchored and smoothed range modifiers. +// The results can have points which are not aligned to the step interval, and as such creating promql *.test scripts which inspect the raw range result is not possible. +func TestExtendedRangeSelectors(t *testing.T) { + storage := promqltest.LoadedStorage(t, ` + load 1m + metric 1 2 _ 4 5 + another_metric{id="1"} 1+1x4 1+1x4 + another_metric{id="2"} 3 2+2x9 + another_metric{id="3"} 5+3x2 3+3x6 + `) + t.Cleanup(func() { storage.Close() }) + + tc := []struct { + query string + t time.Time + expected promql.Matrix + }{ + { + // There is no values within the range of 1m-2m (left-open / right-closed). + // Because of that no back-filling from the look-back is used + query: "metric[1m] anchored", + t: time.Unix(120, 0), + expected: types.GetMatrix(0), + }, + + { + // The range is 59s - 2m + // The value of 1 (T=0) is picked up in the look-back <= 59s + // The value of 2 (T=1m) is picked up as it's in this time range + // The value of 2 (T=1m) is re-used for the value at the end of the range + query: "metric[1m1s] anchored", + t: time.Unix(120, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 59000}, {F: 2, T: 60000}, {F: 2, T: 120000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + + { + // There is no values within the range of 1m-2m (left-open / right-closed). + // Because of that no back-filling from the look-back is used + query: "metric[59s] anchored", + t: time.Unix(120, 0), + expected: types.GetMatrix(0), + }, + + { + // Without the anchored modifier, these range queries only return a single point + query: "another_metric[1m]", + t: time.Unix(90, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 60000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "1"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 60000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "2"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 8, T: 60000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "3"), + }, + }, + }, + + { + query: "another_metric[1m] anchored", + t: time.Unix(90, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 30000}, {F: 2, T: 60000}, {F: 2, T: 90000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "1"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 30000}, {F: 2, T: 60000}, {F: 2, T: 90000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "2"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 5, T: 30000}, {F: 8, T: 60000}, {F: 8, T: 90000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "3"), + }, + }, + }, + + { + query: "another_metric[1m] anchored", + t: time.Unix(60*3, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 120000}, {F: 4, T: 180000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "1"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 120000}, {F: 6, T: 180000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "2"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 11, T: 120000}, {F: 3, T: 180000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "3"), + }, + }, + }, + + { + query: "another_metric[1m] anchored", + t: time.Unix(60*3-1, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 119000}, {F: 3, T: 120000}, {F: 3, T: 179000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "1"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 119000}, {F: 4, T: 120000}, {F: 4, T: 179000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "2"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 8, T: 119000}, {F: 11, T: 120000}, {F: 11, T: 179000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "3"), + }, + }, + }, + + { + query: "another_metric[1m] anchored", + t: time.Unix(60*3+1, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 121000}, {F: 4, T: 180000}, {F: 4, T: 181000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "1"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 121000}, {F: 6, T: 180000}, {F: 6, T: 181000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "2"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 11, T: 121000}, {F: 3, T: 180000}, {F: 3, T: 181000}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "3"), + }, + }, + }, + + { + query: "another_metric[1m] smoothed", + t: time.Unix(90, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1.5, T: 30000}, {T: 60000, F: 2}, {T: 90000, F: 2.5}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "1"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 2.5, T: 30000}, {T: 60000, F: 2}, {T: 90000, F: 3}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "2"), + }, + promql.Series{ + Floats: []promql.FPoint{{F: 6.5, T: 30000}, {T: 60000, F: 8}, {T: 90000, F: 9.5}}, + Metric: labels.FromStrings("__name__", "another_metric", "id", "3"), + }, + }, + }, + } + + for _, tc := range tc { + t.Run(tc.query, func(t *testing.T) { + + opts := NewTestEngineOpts() + planner, err := NewQueryPlanner(opts, NewMaximumSupportedVersionQueryPlanVersionProvider()) + require.NoError(t, err) + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), planner) + require.NoError(t, err) + + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, tc.query, tc.t) + require.NoError(t, err) + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + require.Equal(t, tc.expected, res.Value) + }) + } +} + +// TestExtendedRangeSelectorsIrregular has tests specific to the anchored and smoothed range modifiers. +// The results can have points which are not aligned to the step interval, and as such creating promql *.test scripts which inspect the raw range result is not possible. +// These tests also cover the anchored and smoothed errors. These errors are returned during the planning phase, rather than the execution phase so the *.test promql test harness does not handle this correctly. +func TestExtendedRangeSelectorsIrregular(t *testing.T) { + storage := promqltest.LoadedStorage(t, ` + load 10s + metric 1+1x10 + withreset 1+1x4 1+1x5 + notregular 0 5 100 2 8 + nans 1 2 3 NaN -NaN 4 5 6 + `) + t.Cleanup(func() { storage.Close() }) + + tc := []struct { + query string + t time.Time + expected promql.Matrix + error string + }{ + + { + query: "metric[10s] smoothed", + t: time.Unix(10, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "metric[10s] smoothed", + t: time.Unix(15, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1.5, T: 5000}, {F: 2, T: 10000}, {F: 2.5, T: 15000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "metric[10s] smoothed", + t: time.Unix(5, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: -5000}, {F: 1, T: 0}, {F: 1.5, T: 5000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "metric[10s] smoothed", + t: time.Unix(105, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10.5, T: 95000}, {F: 11, T: 100000}, {F: 11, T: 105000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "withreset[10s] smoothed", + t: time.Unix(45, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 4.5, T: 35000}, {F: 5, T: 40000}, {F: 3, T: 45000}}, + Metric: labels.FromStrings("__name__", "withreset"), + }, + }, + }, + { + query: "metric[10s] anchored", + t: time.Unix(10, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "metric[10s] anchored", + t: time.Unix(15, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 5000}, {F: 2, T: 10000}, {F: 2, T: 15000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "metric[10s] anchored", + t: time.Unix(5, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: -5000}, {F: 1, T: 0}, {F: 1, T: 5000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "metric[10s] anchored", + t: time.Unix(105, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10, T: 95000}, {F: 11, T: 100000}, {F: 11, T: 105000}}, + Metric: labels.FromStrings("__name__", "metric"), + }, + }, + }, + { + query: "withreset[10s] anchored", + t: time.Unix(45, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 35000}, {F: 5, T: 40000}, {F: 5, T: 45000}}, + Metric: labels.FromStrings("__name__", "withreset"), + }, + }, + }, + { + query: "notregular[20s] smoothed", + t: time.Unix(30, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 5, T: 10000}, {F: 100, T: 20000}, {F: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "notregular"), + }, + }, + }, + { + query: "notregular[20s] anchored", + t: time.Unix(30, 0), + expected: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 5, T: 10000}, {F: 100, T: 20000}, {F: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "notregular"), + }, + }, + }, + + { + query: "deriv(notregular[20s] anchored)", + t: time.Unix(30, 0), + error: "anchored modifier can only be used with: changes, delta, increase, rate, resets - not with deriv", + }, + + { + query: "max_over_time(notregular[20s] anchored)", + t: time.Unix(30, 0), + error: "anchored modifier can only be used with: changes, delta, increase, rate, resets - not with max_over_time", + }, + + { + query: "predict_linear(notregular[20s] anchored, 4)", + t: time.Unix(30, 0), + error: "anchored modifier can only be used with: changes, delta, increase, rate, resets - not with predict_linear", + }, + + { + query: "deriv(notregular[20s] smoothed)", + t: time.Unix(30, 0), + error: "smoothed modifier can only be used with: delta, increase, rate - not with deriv", + }, + + { + query: "changes(notregular[20s] smoothed)", + t: time.Unix(30, 0), + error: "smoothed modifier can only be used with: delta, increase, rate - not with changes", + }, + + { + query: "resets(notregular[20s] smoothed)", + t: time.Unix(30, 0), + error: "smoothed modifier can only be used with: delta, increase, rate - not with resets", + }, + } + + for _, tc := range tc { + t.Run(tc.query, func(t *testing.T) { + + opts := NewTestEngineOpts() + planner, err := NewQueryPlanner(opts, NewMaximumSupportedVersionQueryPlanVersionProvider()) + require.NoError(t, err) + + engine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), planner) + require.NoError(t, err) + + qry, err := engine.NewInstantQuery(context.Background(), storage, nil, tc.query, tc.t) + if len(tc.error) > 0 { + require.ErrorContains(t, err, tc.error) + } else { + require.NoError(t, err) + } + if err != nil { + return + } + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + require.Equal(t, tc.expected, res.Value) + + }) + } +} + type dummyMaterializer struct{} func (d dummyMaterializer) Materialize(n planning.Node, materializer *planning.Materializer, timeRange types.QueryTimeRange, params *planning.OperatorParameters) (planning.OperatorFactory, error) { diff --git a/pkg/streamingpromql/operators/functions/rate_increase.go b/pkg/streamingpromql/operators/functions/rate_increase.go index 1dc924392d5..ae96d8071c6 100644 --- a/pkg/streamingpromql/operators/functions/rate_increase.go +++ b/pkg/streamingpromql/operators/functions/rate_increase.go @@ -57,7 +57,7 @@ func rate(isRate bool) RangeVectorStepFunction { if fCount >= 2 { // TODO: just pass step here? (and below) - val := floatRate(isRate, fCount, fHead, fTail, step.RangeStart, step.RangeEnd, rangeSeconds) + val := floatRate(isRate, fCount, fHead, fTail, step.RangeStart, step.RangeEnd, rangeSeconds, step.Smoothed || step.Anchored, step.SmoothedBasisForHeadPoint, step.SmoothedBasisForTailPoint) return val, true, nil, nil } @@ -184,7 +184,7 @@ func histogramRate(isRate bool, hCount int, hHead []promql.HPoint, hTail []promq return val, err } -func floatRate(isRate bool, fCount int, fHead []promql.FPoint, fTail []promql.FPoint, rangeStart int64, rangeEnd int64, rangeSeconds float64) float64 { +func floatRate(isRate bool, fCount int, fHead []promql.FPoint, fTail []promql.FPoint, rangeStart int64, rangeEnd int64, rangeSeconds float64, smoothedOrAnchored bool, smoothedBasisForHeadPoint, smoothedBasisForTailPoint *promql.FPoint) float64 { firstPoint := fHead[0] fHead = fHead[1:] @@ -195,6 +195,31 @@ func floatRate(isRate bool, fCount int, fHead []promql.FPoint, fTail []promql.FP lastPoint = fHead[len(fHead)-1] } + if smoothedOrAnchored { + // We only need to consider samples exactly within the range as the pre-calculated smoothedBasisForHeadPoint & smoothedBasisForTailPoint have already handled the resets at boundaries. + // For smoothed rate/increase range queries, the interpolated points at the range boundaries are calculated differently to compensate for counter values. + // These alternate boundary points have been pre-calculated by the range vector selector. + // Note that the rate() which calls this floatRate() has already tested that fCount >= 2, so we should not have issues pruning the head and tail of these slices. + + if smoothedBasisForHeadPoint != nil { + firstPoint = *smoothedBasisForHeadPoint + } + + if smoothedBasisForTailPoint != nil { + lastPoint = *smoothedBasisForTailPoint + } + + // We are essentially replacing the last point in the slices with the smoothed tail point + // We could achieve the same thing by setting the last value.F in the slice to the smoothedBasisForTailPoint.F, + // and not pruning the slice. This would then avoid the need for the extra delta addition after the accumulations. + // However, we probably do not want to edit values in these slices. + if len(fTail) > 0 { + fTail = fTail[:len(fTail)-1] + } else { + fHead = fHead[:len(fHead)-1] + } + } + delta := lastPoint.F - firstPoint.F previousValue := firstPoint.F @@ -212,7 +237,12 @@ func floatRate(isRate bool, fCount int, fHead []promql.FPoint, fTail []promql.FP accumulate(fHead) accumulate(fTail) - val := calculateFloatRate(true, isRate, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, fCount) + // Compensate for the pruning of the last value above + if smoothedOrAnchored && lastPoint.F < previousValue { + delta += previousValue + } + + val := calculateFloatRate(true, isRate, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, fCount, smoothedOrAnchored) return val } @@ -260,7 +290,17 @@ func calculateHistogramRate(isCounter, isRate bool, rangeStart, rangeEnd int64, // This is based on extrapolatedRate from promql/functions.go. // https://github.com/prometheus/prometheus/pull/13725 has a good explanation of the intended behaviour here. -func calculateFloatRate(isCounter, isRate bool, rangeStart, rangeEnd int64, rangeSeconds float64, firstPoint, lastPoint promql.FPoint, delta float64, count int) float64 { +func calculateFloatRate(isCounter, isRate bool, rangeStart, rangeEnd int64, rangeSeconds float64, firstPoint, lastPoint promql.FPoint, delta float64, count int, smoothedOrAnchored bool) float64 { + + if smoothedOrAnchored { + // This is a special case where the points have already been aligned and interpolated (smoothed) to the range boundaries. + // Combined with the delta calculations in floatRate(), this is functionally equivalent to extendedRate() in promql/functions.go + if isRate { + return delta / rangeSeconds + } + return delta + } + durationToStart := float64(firstPoint.T-rangeStart) / 1000 durationToEnd := float64(rangeEnd-lastPoint.T) / 1000 @@ -336,7 +376,7 @@ func delta(step *types.RangeVectorStepData, _ []types.ScalarData, _ types.QueryT rangeSeconds := float64(step.RangeEnd-step.RangeStart) / 1000 if fCount >= 2 { - val := floatDelta(fCount, fHead, fTail, step.RangeStart, step.RangeEnd, rangeSeconds) + val := floatDelta(fCount, fHead, fTail, step.RangeStart, step.RangeEnd, rangeSeconds, step.Anchored || step.Smoothed) return val, true, nil, nil } @@ -352,7 +392,7 @@ func delta(step *types.RangeVectorStepData, _ []types.ScalarData, _ types.QueryT return 0, false, nil, nil } -func floatDelta(fCount int, fHead []promql.FPoint, fTail []promql.FPoint, rangeStart int64, rangeEnd int64, rangeSeconds float64) float64 { +func floatDelta(fCount int, fHead []promql.FPoint, fTail []promql.FPoint, rangeStart int64, rangeEnd int64, rangeSeconds float64, anchoredOrSmoothed bool) float64 { firstPoint := fHead[0] var lastPoint promql.FPoint @@ -363,7 +403,7 @@ func floatDelta(fCount int, fHead []promql.FPoint, fTail []promql.FPoint, rangeS } delta := lastPoint.F - firstPoint.F - return calculateFloatRate(false, false, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, fCount) + return calculateFloatRate(false, false, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, fCount, anchoredOrSmoothed) } func histogramDelta(hCount int, hHead []promql.HPoint, hTail []promql.HPoint, rangeStart int64, rangeEnd int64, rangeSeconds float64, emitAnnotation types.EmitAnnotationFunc) (*histogram.FloatHistogram, error) { diff --git a/pkg/streamingpromql/operators/selectors/extend_range_vector.go b/pkg/streamingpromql/operators/selectors/extend_range_vector.go new file mode 100644 index 00000000000..9651345f3bb --- /dev/null +++ b/pkg/streamingpromql/operators/selectors/extend_range_vector.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/main/promql/engine.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Prometheus Authors + +package selectors + +import ( + "github.com/prometheus/prometheus/promql" + + "github.com/grafana/mimir/pkg/streamingpromql/types" + "github.com/grafana/mimir/pkg/util/limiter" +) + +// extendRangeVectorPoints will return a slice of points which has been adjusted to have anchored/smoothed points on the bounds of the given range. +// This is used with the anchored/smoothed range query modifiers. +// This implementation is based on extendFloats() found in promql/engine.go +func extendRangeVectorPoints(it *types.FPointRingBufferViewIterator, rangeStart, rangeEnd int64, smoothed bool, memoryConsumptionTracker *limiter.MemoryConsumptionTracker) ([]promql.FPoint, *promql.FPoint, *promql.FPoint, error) { + + // We need a new buffer to store the extended points + // The caller is responsible for releasing this slice back to the slices pool + buff, err := types.FPointSlicePool.Get(it.Count()+2, memoryConsumptionTracker) + if err != nil { + return nil, nil, nil, err + } + + // Find the last point before the rangeStart, or the first point >= rangeStart + first := it.Seek(rangeStart) + + // Use this first value as the range boundary value + buff = append(buff, promql.FPoint{T: rangeStart, F: first.F}) + + // Accumulate the points <= rangeEnd into the buffer. + // Note - if the first.T > rangeStart, it will also be accumulated into buff as the 2nd point in the buffer. + buff = it.CopyRemainingPointsTo(rangeEnd, buff) + last := it.At() + + if last.T != rangeEnd { + // Use the last point >= rangeEnd, or the point immediately preceding as the value for the end boundary + buff = append(buff, promql.FPoint{T: rangeEnd, F: last.F}) + } + + // Smoothing has 2 special cases. + // Firstly, the values on the boundaries are replaced with an interpolated values - there by smoothing the value to reflect the time of the point before/after the boundary + // Secondly, if vector will be used in a rate/increase function then the boundary points must be calculated differently to consider the value as a counter. + // These alternate points will be stored alongside the resulting vector so that the rate/increase function handler can utilise these values. + var smoothedHead *promql.FPoint + var smoothedTail *promql.FPoint + if smoothed && len(buff) > 1 { + if first.T < rangeStart { + buff[0].F = interpolate(first, buff[1], rangeStart, false, true) + smoothedHead = &promql.FPoint{T: rangeStart, F: interpolate(first, buff[1], rangeStart, true, true)} + } + + if last.T > rangeEnd { + buff[len(buff)-1].F = interpolate(buff[len(buff)-2], last, rangeEnd, false, false) + smoothedTail = &promql.FPoint{T: rangeEnd, F: interpolate(buff[len(buff)-2], last, rangeEnd, true, false)} + } + } + + return buff, smoothedHead, smoothedTail, nil +} + +// interpolate performs linear interpolation between two points. +// If isCounter is true and there is a counter reset: +// - on the left edge, it sets the value to 0. +// - on the right edge, it adds the left value to the right value. +// It then calculates the interpolated value at the given timestamp. +// This has been adapted from interpolate() in promql/functions.go +func interpolate(p1, p2 promql.FPoint, t int64, isCounter, leftEdge bool) float64 { + y1 := p1.F + y2 := p2.F + + if isCounter && y2 < y1 { + if leftEdge { + y1 = 0 + } else { + y2 += y1 + } + } + + return y1 + (y2-y1)*float64(t-p1.T)/float64(p2.T-p1.T) +} diff --git a/pkg/streamingpromql/operators/selectors/instant_vector_selector.go b/pkg/streamingpromql/operators/selectors/instant_vector_selector.go index 59a4d5424c8..0bbee85dd6f 100644 --- a/pkg/streamingpromql/operators/selectors/instant_vector_selector.go +++ b/pkg/streamingpromql/operators/selectors/instant_vector_selector.go @@ -7,6 +7,7 @@ package selectors import ( "context" + "errors" "fmt" "math" @@ -114,10 +115,15 @@ func (v *InstantVectorSelector) NextSeries(ctx context.Context) (types.InstantVe if valueType == chunkenc.ValNone || t > ts { var ok bool + + // Keep this a copy of this point for use with smoothed case below. + right := promql.FPoint{T: t, F: f} + t, f, h, ok = v.memoizedIterator.PeekPrev() if !ok || t <= ts-v.Selector.LookbackDelta.Milliseconds() { continue } + if h != nil { if t == lastHistogramT && lastHistogram != nil { // Reuse exactly the same FloatHistogram as last time, don't bother creating another FloatHistogram yet. @@ -125,6 +131,14 @@ func (v *InstantVectorSelector) NextSeries(ctx context.Context) (types.InstantVe // to AtFloatHistogram, so if we're going to return this histogram, we'll make a copy below. h = lastHistogram } + } else { + // If this query uses the 'smoothed' modifier, we look back within the look-back delta + // to find the most recent float value before the requested timestamp. + // If both a previous and a next point are found, the value at the requested time + // is computed as the linear interpolation between those two points. + if v.Selector.Smoothed && valueType == chunkenc.ValFloat && right.T <= ts+v.Selector.LookbackDelta.Milliseconds() { + f = f + (right.F-f)*float64(ts-t)/float64(right.T-t) + } } } @@ -142,6 +156,11 @@ func (v *InstantVectorSelector) NextSeries(ctx context.Context) (types.InstantVe // PeekPrev will set the histogram to nil, or the value to 0 if the other type exists. // So check if histograms is nil first. If we don't have a histogram, then we should have a value and vice-versa. if h != nil { + + if v.Selector.Smoothed { + return types.InstantVectorSeriesData{}, errors.New("smoothed and anchored modifiers do not work with native histograms") + } + // Only create the slice once we know the series is a histogram or not. // (It is possible to over-allocate in the case where we have both floats and histograms, but that won't be common). if len(data.Histograms) == 0 { diff --git a/pkg/streamingpromql/operators/selectors/range_vector_selector.go b/pkg/streamingpromql/operators/selectors/range_vector_selector.go index 76767364968..8b8e6886368 100644 --- a/pkg/streamingpromql/operators/selectors/range_vector_selector.go +++ b/pkg/streamingpromql/operators/selectors/range_vector_selector.go @@ -7,6 +7,7 @@ package selectors import ( "context" + "errors" "fmt" "github.com/prometheus/prometheus/model/value" @@ -22,23 +23,40 @@ type RangeVectorSelector struct { Selector *Selector Stats *types.QueryStats - rangeMilliseconds int64 - chunkIterator chunkenc.Iterator - nextStepT int64 - floats *types.FPointRingBuffer - histograms *types.HPointRingBuffer - stepData *types.RangeVectorStepData // Retain the last step data instance we used to avoid allocating it for every step. + rangeMilliseconds int64 + chunkIterator chunkenc.Iterator + nextStepT int64 + floats *types.FPointRingBuffer + extendedRangeFloats *types.FPointRingBuffer // A buffer we use to create views for smoothed/anchored extended ranges which have added/modified points from the original floats buffer + histograms *types.HPointRingBuffer + stepData *types.RangeVectorStepData // Retain the last step data instance we used to avoid allocating it for every step. + extendedRangeView *types.FPointRingBufferView + extendedRangeIterator *types.FPointRingBufferViewIterator + + memoryConsumptionTracker *limiter.MemoryConsumptionTracker + anchored bool // The anchored modifier has been used for this range query + smoothed bool // The smoothed modifier has been used for this range query } var _ types.RangeVectorOperator = &RangeVectorSelector{} -func NewRangeVectorSelector(selector *Selector, memoryConsumptionTracker *limiter.MemoryConsumptionTracker) *RangeVectorSelector { - return &RangeVectorSelector{ - Selector: selector, - floats: types.NewFPointRingBuffer(memoryConsumptionTracker), - histograms: types.NewHPointRingBuffer(memoryConsumptionTracker), - stepData: &types.RangeVectorStepData{}, +func NewRangeVectorSelector(selector *Selector, memoryConsumptionTracker *limiter.MemoryConsumptionTracker, anchored bool, smoothed bool) *RangeVectorSelector { + + rangeVectorSelector := RangeVectorSelector{ + Selector: selector, + floats: types.NewFPointRingBuffer(memoryConsumptionTracker), + histograms: types.NewHPointRingBuffer(memoryConsumptionTracker), + stepData: &types.RangeVectorStepData{Anchored: anchored, Smoothed: smoothed}, // Include the smoothed/anchored context to the step data as functions such as rate/increase require this + anchored: anchored, + smoothed: smoothed, + memoryConsumptionTracker: memoryConsumptionTracker, + } + + if anchored || smoothed { + rangeVectorSelector.extendedRangeFloats = types.NewFPointRingBuffer(memoryConsumptionTracker) } + + return &rangeVectorSelector } func (m *RangeVectorSelector) ExpressionPosition() posrange.PositionRange { @@ -62,6 +80,9 @@ func (m *RangeVectorSelector) NextSeries(ctx context.Context) error { m.nextStepT = m.Selector.TimeRange.StartT m.floats.Reset() m.histograms.Reset() + if m.extendedRangeFloats != nil { + m.extendedRangeFloats.Reset() + } return nil } @@ -82,17 +103,79 @@ func (m *RangeVectorSelector) NextStepSamples(ctx context.Context) (*types.Range // Apply offset after adjusting for timestamp from @ modifier. rangeEnd = rangeEnd - m.Selector.Offset rangeStart := rangeEnd - m.rangeMilliseconds + + // Take a copy of the original range - the smoothed/anchored modifiers will change these + originalRangeStart := rangeStart + originalRangeEnd := rangeEnd + + // When the smoothed/anchored modifiers are used, the selector (fillBuffer) will return a wider range of points. + // Modify the range boundaries accordingly so that we do not discard these extended points. + if m.anchored { + rangeStart -= m.Selector.LookbackDelta.Milliseconds() + } else if m.smoothed { + rangeStart -= m.Selector.LookbackDelta.Milliseconds() + rangeEnd += m.Selector.LookbackDelta.Milliseconds() + } + m.floats.DiscardPointsAtOrBefore(rangeStart) m.histograms.DiscardPointsAtOrBefore(rangeStart) + m.stepData.SmoothedBasisForHeadPoint = nil + m.stepData.SmoothedBasisForTailPoint = nil + // Fill the buffer with an extended range of points (smoothed/anchored) - these will be filtered out in the extendRangeVectorPoints() below if err := m.fillBuffer(m.floats, m.histograms, rangeStart, rangeEnd); err != nil { return nil, err } - m.stepData.Floats = m.floats.ViewUntilSearchingBackwards(rangeEnd, m.stepData.Floats) + if m.anchored || m.smoothed { + m.extendedRangeFloats.Release() + + // Histograms are not supported for these modified range queries + if m.histograms.ViewUntilSearchingForwards(rangeEnd, nil).Count() > 0 { + return nil, errors.New("smoothed and anchored modifiers do not work with native histograms") + } + + // Note the extended range end is used since smoothed will have extended this + m.extendedRangeView = m.floats.ViewUntilSearchingForwards(rangeEnd, m.extendedRangeView) + + var buff []promql.FPoint + var err error + var smoothedHead, smoothedTail *promql.FPoint + + if m.extendedRangeView.Any() { + // ignore ok as we already tested that we have points + lastInView, _ := m.extendedRangeView.Last() + + // No points were found within the original range. + // If we only find points prior to the start of the original range then no points are returned. + if lastInView.T > originalRangeStart { + m.extendedRangeIterator = m.extendedRangeView.Iterator(m.extendedRangeIterator) + buff, smoothedHead, smoothedTail, err = extendRangeVectorPoints(m.extendedRangeIterator, originalRangeStart, originalRangeEnd, m.smoothed, m.memoryConsumptionTracker) + if err != nil { + return nil, err + } + } + } + + if buff != nil { + err := m.extendedRangeFloats.Use(buff) + if err != nil { + return nil, err + } + } + + // Store the smoothed points in the range step data result so that consumers of this data can reference these values + // without having to re-calculate off the original points. Re-use the view + m.stepData.Floats = m.extendedRangeFloats.ViewAll(m.stepData.Floats) + m.stepData.SmoothedBasisForHeadPoint = smoothedHead + m.stepData.SmoothedBasisForTailPoint = smoothedTail + } else { + m.stepData.Floats = m.floats.ViewUntilSearchingBackwards(rangeEnd, m.stepData.Floats) + } + m.stepData.Histograms = m.histograms.ViewUntilSearchingBackwards(rangeEnd, m.stepData.Histograms) - m.stepData.RangeStart = rangeStart - m.stepData.RangeEnd = rangeEnd + m.stepData.RangeStart = originalRangeStart // important to return the original range start so that functions like rate() can determine the range duration regardless of smoothed / anchored + m.stepData.RangeEnd = originalRangeEnd m.Stats.IncrementSamplesAtTimestamp(m.stepData.StepT, int64(m.stepData.Floats.Count())+m.stepData.Histograms.EquivalentFloatSampleCount()) @@ -166,6 +249,9 @@ func (m *RangeVectorSelector) Finalize(ctx context.Context) error { func (m *RangeVectorSelector) Close() { m.Selector.Close() m.floats.Close() + if m.extendedRangeFloats != nil { + m.extendedRangeFloats.Close() + } m.histograms.Close() m.chunkIterator = nil } diff --git a/pkg/streamingpromql/operators/selectors/selector.go b/pkg/streamingpromql/operators/selectors/selector.go index 95e29d41ea0..6236f73969c 100644 --- a/pkg/streamingpromql/operators/selectors/selector.go +++ b/pkg/streamingpromql/operators/selectors/selector.go @@ -5,7 +5,6 @@ package selectors import ( "context" "errors" - "fmt" "sync" "time" @@ -36,6 +35,10 @@ type Selector struct { // Set for range vector selectors, otherwise 0. Range time.Duration + // When these range selector modifiers are used the start/end timestamps are adjusted to query for a larger range of points + Anchored bool + Smoothed bool + MemoryConsumptionTracker *limiter.MemoryConsumptionTracker querier storage.Querier @@ -119,7 +122,7 @@ func (s *Selector) loadSeriesSet(ctx context.Context, matchers types.Matchers) e return errors.New("should not call Selector.loadSeriesSet() multiple times") } - startTimestamp, endTimestamp := ComputeQueriedTimeRange(s.TimeRange, s.Timestamp, s.Range, s.Offset, s.LookbackDelta) + startTimestamp, endTimestamp := ComputeQueriedTimeRange(s.TimeRange, s.Timestamp, s.Range, s.Offset, s.LookbackDelta, s.Anchored, s.Smoothed) hints := &storage.SelectHints{ Start: startTimestamp, @@ -154,11 +157,7 @@ func (s *Selector) loadSeriesSet(ctx context.Context, matchers types.Matchers) e return nil } -func ComputeQueriedTimeRange(timeRange types.QueryTimeRange, timestamp *int64, selectorRange time.Duration, offset int64, lookbackDelta time.Duration) (int64, int64) { - if lookbackDelta != 0 && selectorRange != 0 { - panic(fmt.Sprintf("both lookback delta (%s) and selector range (%s) are non-zero", lookbackDelta, selectorRange)) - } - +func ComputeQueriedTimeRange(timeRange types.QueryTimeRange, timestamp *int64, selectorRange time.Duration, offset int64, lookbackDelta time.Duration, anchored bool, smoothed bool) (int64, int64) { startTimestamp := timeRange.StartT endTimestamp := timeRange.EndT @@ -173,6 +172,14 @@ func ComputeQueriedTimeRange(timeRange types.QueryTimeRange, timestamp *int64, s startTimestamp = startTimestamp - lookbackDelta.Milliseconds() - rangeMilliseconds - offset + 1 // +1 to exclude samples on the lower boundary of the range (queriers work with closed intervals, we use left-open). endTimestamp = endTimestamp - offset + if anchored || smoothed { + startTimestamp -= selectorRange.Milliseconds() + lookbackDelta.Milliseconds() + } + + if smoothed { + endTimestamp += lookbackDelta.Milliseconds() + } + return startTimestamp, endTimestamp } diff --git a/pkg/streamingpromql/optimize/plan/eliminate_deduplicate_and_merge_test.go b/pkg/streamingpromql/optimize/plan/eliminate_deduplicate_and_merge_test.go index 53795b8d6e8..52b9040c403 100644 --- a/pkg/streamingpromql/optimize/plan/eliminate_deduplicate_and_merge_test.go +++ b/pkg/streamingpromql/optimize/plan/eliminate_deduplicate_and_merge_test.go @@ -1007,10 +1007,12 @@ func runTestCasesWithDelayedNameRemovalDisabled(t *testing.T, globPattern string types.EnableManglingReturnedSlices = true parser.ExperimentalDurationExpr = true parser.EnableExperimentalFunctions = true + parser.EnableExtendedRangeSelectors = true t.Cleanup(func() { types.EnableManglingReturnedSlices = false parser.ExperimentalDurationExpr = false parser.EnableExperimentalFunctions = false + parser.EnableExtendedRangeSelectors = false }) testdataFS := os.DirFS("../../testdata") diff --git a/pkg/streamingpromql/planning.go b/pkg/streamingpromql/planning.go index 063d60c6106..2722bb481aa 100644 --- a/pkg/streamingpromql/planning.go +++ b/pkg/streamingpromql/planning.go @@ -6,6 +6,9 @@ import ( "context" "errors" "fmt" + "maps" + "slices" + "strings" "time" "github.com/go-kit/log" @@ -29,6 +32,14 @@ import ( "github.com/grafana/mimir/pkg/util/spanlogger" ) +var smoothedIncompatibleFunctionPrefix string +var anchoredIncompatibleFunctionPrefix string + +func init() { + smoothedIncompatibleFunctionPrefix = fmt.Sprintf("smoothed modifier can only be used with: %s - not with ", sortImplode(promql.SmoothedSafeFunctions)) + anchoredIncompatibleFunctionPrefix = fmt.Sprintf("anchored modifier can only be used with: %s - not with ", sortImplode(promql.AnchoredSafeFunctions)) +} + type QueryPlanner struct { activeQueryTracker QueryTracker noStepSubqueryIntervalFn func(rangeMillis int64) int64 @@ -377,6 +388,7 @@ func (p *QueryPlanner) nodeFromExpr(expr parser.Expr) (planning.Node, error) { Timestamp: core.TimeFromTimestamp(expr.Timestamp), Offset: expr.OriginalOffset, ExpressionPosition: core.PositionRangeFrom(expr.PositionRange()), + Smoothed: expr.Smoothed, // Note that we deliberately do not propagate SkipHistogramBuckets from the expression here. // This is done in the skip histogram buckets optimization pass, after common subexpression elimination is applied, // to simplify the logic in the common subexpression elimination optimization pass. Otherwise it would have to deal @@ -397,6 +409,8 @@ func (p *QueryPlanner) nodeFromExpr(expr parser.Expr) (planning.Node, error) { Offset: vs.OriginalOffset, Range: expr.Range, ExpressionPosition: core.PositionRangeFrom(expr.PositionRange()), + Anchored: vs.Anchored, + Smoothed: vs.Smoothed, // Note that we deliberately do not propagate SkipHistogramBuckets from the expression here. See the explanation above. }, }, nil @@ -501,6 +515,19 @@ func (p *QueryPlanner) nodeFromExpr(expr parser.Expr) (planning.Node, error) { if err != nil { return nil, err } + matrixSelector, ok := node.(*core.MatrixSelector) + if ok && matrixSelector.Anchored { + _, supported := promql.AnchoredSafeFunctions[expr.Func.Name] + if !supported { + return nil, getAnchoredIncompatibleFunctionError(expr.Func.Name) + } + } + if ok && matrixSelector.Smoothed { + _, supported := promql.SmoothedSafeFunctions[expr.Func.Name] + if !supported { + return nil, getSmoothedIncompatibleFunctionError(expr.Func.Name) + } + } args = append(args, node) } @@ -817,3 +844,16 @@ type staticQueryPlanVersionProvider struct { func (s *staticQueryPlanVersionProvider) GetMaximumSupportedQueryPlanVersion(ctx context.Context) (planning.QueryPlanVersion, error) { return s.version, nil } + +func getSmoothedIncompatibleFunctionError(fncName string) error { + return fmt.Errorf("%s%s", smoothedIncompatibleFunctionPrefix, fncName) +} + +func getAnchoredIncompatibleFunctionError(fncName string) error { + return fmt.Errorf("%s%s", anchoredIncompatibleFunctionPrefix, fncName) +} + +func sortImplode(m map[string]struct{}) string { + tmp := slices.Sorted(maps.Keys(m)) + return strings.Join(tmp, ", ") +} diff --git a/pkg/streamingpromql/planning/core/core.pb.go b/pkg/streamingpromql/planning/core/core.pb.go index 69306273763..cd4cdced58d 100644 --- a/pkg/streamingpromql/planning/core/core.pb.go +++ b/pkg/streamingpromql/planning/core/core.pb.go @@ -740,6 +740,7 @@ type VectorSelectorDetails struct { ExpressionPosition PositionRange `protobuf:"bytes,4,opt,name=expressionPosition,proto3" json:"expressionPosition"` ReturnSampleTimestamps bool `protobuf:"varint,5,opt,name=returnSampleTimestamps,proto3" json:"returnSampleTimestamps,omitempty"` SkipHistogramBuckets bool `protobuf:"varint,6,opt,name=skipHistogramBuckets,proto3" json:"skipHistogramBuckets,omitempty"` + Smoothed bool `protobuf:"varint,7,opt,name=smoothed,proto3" json:"smoothed,omitempty"` } func (m *VectorSelectorDetails) Reset() { *m = VectorSelectorDetails{} } @@ -816,6 +817,13 @@ func (m *VectorSelectorDetails) GetSkipHistogramBuckets() bool { return false } +func (m *VectorSelectorDetails) GetSmoothed() bool { + if m != nil { + return m.Smoothed + } + return false +} + func (*VectorSelectorDetails) XXX_MessageName() string { return "core.VectorSelectorDetails" } @@ -827,6 +835,8 @@ type MatrixSelectorDetails struct { Range time.Duration `protobuf:"bytes,4,opt,name=range,proto3,stdduration" json:"range"` ExpressionPosition PositionRange `protobuf:"bytes,5,opt,name=expressionPosition,proto3" json:"expressionPosition"` SkipHistogramBuckets bool `protobuf:"varint,6,opt,name=skipHistogramBuckets,proto3" json:"skipHistogramBuckets,omitempty"` + Smoothed bool `protobuf:"varint,7,opt,name=smoothed,proto3" json:"smoothed,omitempty"` + Anchored bool `protobuf:"varint,8,opt,name=anchored,proto3" json:"anchored,omitempty"` } func (m *MatrixSelectorDetails) Reset() { *m = MatrixSelectorDetails{} } @@ -903,6 +913,20 @@ func (m *MatrixSelectorDetails) GetSkipHistogramBuckets() bool { return false } +func (m *MatrixSelectorDetails) GetSmoothed() bool { + if m != nil { + return m.Smoothed + } + return false +} + +func (m *MatrixSelectorDetails) GetAnchored() bool { + if m != nil { + return m.Anchored + } + return false +} + func (*MatrixSelectorDetails) XXX_MessageName() string { return "core.MatrixSelectorDetails" } @@ -1190,94 +1214,96 @@ func init() { func init() { proto.RegisterFile("core.proto", fileDescriptor_f7e43720d1edc0fe) } var fileDescriptor_f7e43720d1edc0fe = []byte{ - // 1386 bytes of a gzipped FileDescriptorProto + // 1414 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xbd, 0x6f, 0x1b, 0x47, - 0x16, 0xe7, 0x2e, 0x49, 0x99, 0x7a, 0x92, 0xa8, 0xf1, 0x88, 0xb2, 0x79, 0xba, 0xc3, 0x4a, 0x20, + 0x16, 0xe7, 0x2e, 0x49, 0x89, 0x7a, 0x92, 0xa8, 0xf1, 0x88, 0xb2, 0x79, 0xba, 0xc3, 0x4a, 0x20, 0xee, 0x0e, 0x82, 0x0a, 0xf2, 0xac, 0x03, 0xce, 0x67, 0xf8, 0x70, 0x87, 0xa5, 0xc8, 0x93, 0x19, 0xf3, 0x43, 0x5a, 0x92, 0x72, 0x52, 0x04, 0xc6, 0x90, 0x1c, 0xad, 0x16, 0xde, 0xdd, 0x59, 0xcf, - 0xce, 0xca, 0x56, 0x97, 0x26, 0x45, 0x3a, 0x97, 0x29, 0xf2, 0x07, 0xa4, 0xc9, 0x1f, 0x90, 0x32, - 0x9d, 0x52, 0x04, 0x70, 0x69, 0xa4, 0x50, 0x62, 0xa9, 0x49, 0xe9, 0xda, 0x01, 0x82, 0x60, 0x3f, - 0x48, 0x2d, 0x29, 0xc9, 0xb1, 0x65, 0xa5, 0x48, 0xb5, 0xf3, 0xde, 0xbc, 0xdf, 0xfb, 0x9e, 0xf7, - 0x16, 0xa0, 0xcf, 0x38, 0x2d, 0x3a, 0x9c, 0x09, 0x86, 0x53, 0xfe, 0x79, 0xe9, 0x1f, 0xba, 0x21, - 0xf6, 0xbc, 0x5e, 0xb1, 0xcf, 0xac, 0x92, 0xce, 0xc9, 0x2e, 0xb1, 0x49, 0xc9, 0x32, 0x2c, 0x83, - 0x97, 0x9c, 0x47, 0x7a, 0x78, 0x72, 0x7a, 0xe1, 0x37, 0xc4, 0x2d, 0x6d, 0xbd, 0x11, 0xe1, 0x0a, - 0x4e, 0x89, 0x65, 0xd8, 0xba, 0xc3, 0x99, 0xf5, 0xd8, 0x2c, 0x31, 0x87, 0x72, 0x22, 0x18, 0x77, - 0x4b, 0xbb, 0x9e, 0xdd, 0x17, 0x06, 0xb3, 0x63, 0xa7, 0x48, 0x63, 0x4e, 0x67, 0x3a, 0x0b, 0x8e, - 0x25, 0xff, 0x14, 0x71, 0x15, 0x9d, 0x31, 0xdd, 0xa4, 0xa5, 0x80, 0xea, 0x79, 0xbb, 0xa5, 0x81, - 0xc7, 0x89, 0x0f, 0x8b, 0xee, 0x97, 0x27, 0xef, 0x85, 0x61, 0x51, 0x57, 0x10, 0xcb, 0x09, 0x05, - 0x0a, 0x5f, 0x4b, 0x30, 0xb7, 0xc5, 0x5c, 0xc3, 0xc7, 0x68, 0xc4, 0xd6, 0x29, 0xee, 0x42, 0xda, - 0x15, 0x84, 0x8b, 0xbc, 0xb4, 0x22, 0xad, 0x26, 0xcb, 0xff, 0x7b, 0x7d, 0xb4, 0x7c, 0x37, 0x16, - 0x8d, 0xef, 0x32, 0x15, 0x7b, 0xd4, 0x73, 0x27, 0x8f, 0x8f, 0xcd, 0x92, 0x43, 0xb8, 0x4b, 0x79, - 0xc9, 0x61, 0x2e, 0xf7, 0x75, 0x15, 0xb7, 0x98, 0xab, 0x85, 0xda, 0xf0, 0x36, 0x24, 0xa9, 0x3d, - 0xc8, 0xcb, 0x57, 0xa3, 0xd4, 0xd7, 0x55, 0xb8, 0x05, 0x8b, 0x65, 0xc3, 0x26, 0xfc, 0xa0, 0xfa, - 0xd4, 0xe1, 0xd4, 0x75, 0x0d, 0x66, 0xdf, 0x33, 0x6c, 0xe1, 0xe2, 0x3c, 0x5c, 0x33, 0xec, 0xbe, - 0xe9, 0x0d, 0x68, 0x5e, 0x5a, 0x49, 0xae, 0x4e, 0x6b, 0x43, 0xb2, 0xf0, 0xad, 0x04, 0x4b, 0xaa, - 0xae, 0x73, 0xaa, 0x13, 0x41, 0x4f, 0x61, 0x15, 0x2a, 0x88, 0x61, 0xba, 0x78, 0x0d, 0x64, 0xe6, - 0x04, 0x81, 0x67, 0xd7, 0x97, 0x8a, 0x41, 0x1f, 0x0c, 0xa5, 0x0d, 0x66, 0xb7, 0x82, 0x32, 0xf9, - 0x89, 0x92, 0x99, 0x83, 0x97, 0x20, 0xa3, 0x73, 0xe6, 0x39, 0x86, 0xad, 0xe7, 0xe5, 0xc0, 0xca, - 0x88, 0xf6, 0x1d, 0x78, 0x62, 0x88, 0x3d, 0xe6, 0x89, 0x7c, 0x72, 0x45, 0x5a, 0xcd, 0x68, 0x43, - 0x12, 0xd7, 0x00, 0xd3, 0x91, 0xd9, 0x61, 0xe2, 0xf3, 0xa9, 0x15, 0x69, 0x75, 0x66, 0x7d, 0x21, - 0xb4, 0x38, 0x56, 0x8e, 0x72, 0xea, 0xf0, 0x68, 0x39, 0xa1, 0x9d, 0x03, 0x2a, 0x7c, 0x21, 0xc3, - 0xcd, 0xc9, 0xf8, 0x87, 0x81, 0xfc, 0x2d, 0x16, 0xc8, 0x62, 0xa8, 0x36, 0x14, 0x1d, 0x8f, 0xe1, - 0x3f, 0x90, 0xdd, 0xa7, 0x7d, 0xc1, 0x78, 0x83, 0x88, 0xfe, 0x5e, 0x18, 0x89, 0xef, 0x49, 0x2e, - 0x84, 0xec, 0x8c, 0xdd, 0x69, 0x13, 0xb2, 0x58, 0x01, 0xe0, 0x54, 0x78, 0xdc, 0x2e, 0x33, 0x66, - 0x46, 0x81, 0xc6, 0x38, 0x57, 0x18, 0x2b, 0xbe, 0x05, 0xe9, 0x3d, 0xbf, 0xb4, 0xf9, 0x74, 0x80, - 0xfe, 0x73, 0x3c, 0xa4, 0x89, 0xea, 0x6b, 0xa1, 0x64, 0xe1, 0x1b, 0x09, 0xb2, 0xe3, 0x01, 0xe0, - 0x8f, 0x21, 0xd5, 0x27, 0x7c, 0x10, 0x75, 0x76, 0xed, 0xf5, 0xd1, 0x72, 0xf5, 0xdd, 0x9a, 0x30, - 0x9e, 0x91, 0x0d, 0xc2, 0x07, 0x86, 0x4d, 0x4c, 0x43, 0x1c, 0x68, 0x81, 0x5a, 0xfc, 0x77, 0xc8, - 0x5a, 0x91, 0xa9, 0x3a, 0xe9, 0x51, 0xd3, 0x8d, 0xfa, 0x62, 0x82, 0x8b, 0xb3, 0x20, 0x33, 0x3b, - 0xca, 0x97, 0xcc, 0xec, 0x78, 0xbb, 0xa6, 0xc6, 0xdb, 0xf5, 0x53, 0x19, 0x16, 0xfe, 0x1f, 0x0d, - 0x82, 0x0d, 0x62, 0x9a, 0xc3, 0xf2, 0x96, 0x20, 0x33, 0x9c, 0x0f, 0x51, 0x91, 0x17, 0x8a, 0xa7, - 0x03, 0x63, 0x88, 0xd0, 0x46, 0x42, 0x98, 0xc3, 0x2c, 0xe9, 0xb9, 0xd4, 0x16, 0x31, 0xc7, 0xa2, - 0x22, 0x08, 0xfa, 0xd4, 0xe9, 0x15, 0x03, 0xfe, 0x16, 0x31, 0x78, 0xf9, 0x8e, 0x5f, 0x84, 0xef, - 0x8f, 0x96, 0x6f, 0xbd, 0xcd, 0xd0, 0x0b, 0x71, 0xea, 0x80, 0x38, 0x82, 0x72, 0x6d, 0xcc, 0xc6, - 0x05, 0xe5, 0x4f, 0x5e, 0xa6, 0xd5, 0x9f, 0x40, 0xae, 0xe9, 0x59, 0x3d, 0xca, 0xeb, 0x86, 0xa0, - 0x9c, 0x8c, 0xf2, 0x90, 0x83, 0xf4, 0x3e, 0x31, 0x3d, 0x1a, 0x24, 0x41, 0xd2, 0x42, 0xe2, 0x02, - 0xc3, 0xf2, 0x25, 0x0d, 0xb7, 0x05, 0xf7, 0x4b, 0xf7, 0x06, 0xc3, 0xd3, 0xbf, 0x83, 0xe1, 0xcf, - 0x24, 0xb8, 0xd1, 0x3d, 0xff, 0x6d, 0xff, 0x35, 0xf6, 0xb6, 0xa3, 0x87, 0xda, 0x3d, 0xfb, 0xb4, - 0xaf, 0xd0, 0x97, 0x9f, 0x65, 0x58, 0x0c, 0x1b, 0xbf, 0x4d, 0xcd, 0xe0, 0x3b, 0x74, 0xa5, 0x08, - 0x99, 0xa0, 0xb7, 0x29, 0x77, 0x83, 0x49, 0x3b, 0xb3, 0x8e, 0x43, 0xd5, 0x41, 0x0b, 0x34, 0xc2, - 0x2b, 0x6d, 0x24, 0x83, 0xff, 0x0b, 0xd3, 0xa3, 0x05, 0x14, 0xf9, 0xb2, 0x54, 0x0c, 0x57, 0x54, - 0x71, 0xb8, 0xa2, 0x8a, 0x9d, 0xa1, 0x44, 0x39, 0xf5, 0xec, 0x87, 0x65, 0x49, 0x3b, 0x85, 0xe0, - 0xbb, 0x30, 0xc5, 0x76, 0x77, 0x5d, 0x2a, 0xa2, 0x36, 0xfa, 0xd3, 0x19, 0x70, 0x25, 0xda, 0x7f, - 0xe5, 0x8c, 0x1f, 0xce, 0xe7, 0x3e, 0x3e, 0x82, 0x5c, 0xe5, 0x38, 0xfa, 0x17, 0xdc, 0x08, 0xe7, - 0x5c, 0x9b, 0x58, 0x8e, 0x49, 0x47, 0x1e, 0x87, 0xf3, 0x29, 0xa3, 0x5d, 0x70, 0x8b, 0xd7, 0x21, - 0xe7, 0x3e, 0x32, 0x9c, 0x7b, 0x86, 0x2b, 0x98, 0xce, 0x89, 0x55, 0xf6, 0xfa, 0x8f, 0xa8, 0x70, - 0xf3, 0x53, 0x01, 0xea, 0xdc, 0xbb, 0xc2, 0x2f, 0x32, 0x2c, 0x36, 0x88, 0xe0, 0xc6, 0xd3, 0x3f, - 0x74, 0xf6, 0xef, 0x40, 0x3a, 0x58, 0xdf, 0x51, 0xc2, 0xdf, 0x0a, 0x1b, 0x22, 0x2e, 0x28, 0x5c, - 0xfa, 0x32, 0x85, 0xbb, 0x4c, 0x01, 0xbe, 0x93, 0x61, 0xbe, 0xed, 0xf5, 0x1e, 0x7b, 0x94, 0x1f, - 0x0c, 0x53, 0x3f, 0x96, 0x4a, 0xe9, 0x7d, 0x52, 0x29, 0xbf, 0x47, 0x2a, 0x93, 0xef, 0x9c, 0xca, - 0xdb, 0x90, 0x72, 0x05, 0x75, 0xde, 0xa5, 0x08, 0x01, 0xe0, 0x0a, 0x6b, 0x50, 0x58, 0x01, 0xa5, - 0x2d, 0xa8, 0x53, 0xb3, 0xf7, 0x09, 0x37, 0x88, 0x2d, 0xce, 0x4c, 0x38, 0x7f, 0xf8, 0xcd, 0xc6, - 0x7b, 0x18, 0xb7, 0x20, 0x25, 0x0e, 0x1c, 0x1a, 0x2d, 0xee, 0xbb, 0xaf, 0x8f, 0x96, 0x6f, 0xff, - 0xe6, 0xe2, 0xb6, 0xd8, 0x80, 0x9a, 0x25, 0x33, 0xd8, 0x45, 0xc5, 0x40, 0x51, 0xe7, 0xc0, 0xa1, - 0x5a, 0xa0, 0x08, 0x63, 0x48, 0xd9, 0xc4, 0xa2, 0x41, 0xf6, 0xa7, 0xb5, 0xe0, 0x7c, 0x3a, 0xd3, - 0x93, 0xb1, 0x99, 0x5e, 0xb8, 0x03, 0x4b, 0x15, 0x3a, 0xf0, 0x1c, 0xd3, 0xe8, 0x13, 0x41, 0x55, - 0x7b, 0xd0, 0xa0, 0x5c, 0xa7, 0x91, 0xa7, 0x1f, 0xa4, 0x32, 0x12, 0x92, 0xb5, 0x45, 0xee, 0xd9, - 0x15, 0x6a, 0x92, 0x03, 0x3a, 0x68, 0x12, 0x8b, 0x6a, 0xd4, 0x62, 0xfb, 0xc4, 0x2c, 0x5c, 0x87, - 0xf9, 0x0a, 0x67, 0x8e, 0xcf, 0x8a, 0xe4, 0xd7, 0x0e, 0x65, 0xc8, 0x9d, 0xf7, 0x47, 0x89, 0x6f, - 0xc2, 0x82, 0xba, 0xb9, 0xa9, 0x55, 0x37, 0xd5, 0x4e, 0xad, 0xd5, 0x7c, 0xd8, 0x6d, 0xde, 0x6f, - 0xb6, 0x1e, 0x34, 0x51, 0x02, 0x2f, 0xc0, 0x7c, 0xfc, 0xa2, 0xdd, 0x6d, 0x20, 0x69, 0x92, 0xa9, - 0xee, 0x6c, 0x22, 0x19, 0x2f, 0xc2, 0xf5, 0x38, 0x73, 0xa3, 0xd5, 0x6d, 0x76, 0x50, 0x72, 0x52, - 0xb6, 0x51, 0x6b, 0xa2, 0xd4, 0x19, 0xa6, 0xfa, 0x21, 0x4a, 0x4f, 0x2a, 0xd8, 0xd4, 0x5a, 0xdd, - 0x2d, 0x34, 0x85, 0x6f, 0x00, 0x1e, 0xf3, 0xa0, 0x53, 0xa9, 0x54, 0x77, 0xd0, 0xb5, 0x73, 0xf8, - 0x3b, 0xaa, 0x86, 0x32, 0x38, 0x07, 0x28, 0xce, 0xef, 0xb4, 0xb6, 0xee, 0xa3, 0xe9, 0xc9, 0x00, - 0xcb, 0xad, 0x4e, 0xa7, 0xd5, 0xb8, 0x8f, 0x00, 0xff, 0x05, 0xf2, 0x67, 0xdc, 0x7e, 0xb8, 0xa3, - 0xd6, 0xbb, 0xd5, 0x36, 0x9a, 0xc1, 0x79, 0xc8, 0xc5, 0x6f, 0xb7, 0xbb, 0x6a, 0xb3, 0x53, 0xab, - 0x57, 0xd1, 0xec, 0xda, 0x57, 0x32, 0xcc, 0x4f, 0xfc, 0xd3, 0x62, 0x0c, 0xd9, 0x72, 0xad, 0xa9, - 0x6a, 0x1f, 0xc5, 0x12, 0x38, 0x0f, 0x33, 0x11, 0xaf, 0xae, 0x36, 0x2b, 0x48, 0xc2, 0x59, 0x80, - 0x21, 0xa3, 0xa5, 0x21, 0x39, 0x06, 0xaa, 0x77, 0x9b, 0xf5, 0x6a, 0xbb, 0x8d, 0x92, 0x18, 0xc1, - 0x6c, 0xc4, 0x53, 0x3b, 0x6a, 0x73, 0x1d, 0xa5, 0x62, 0xa8, 0x76, 0xb7, 0x8c, 0xd2, 0x31, 0x5a, - 0xad, 0x54, 0xd0, 0x54, 0x8c, 0x6e, 0x74, 0xeb, 0xe8, 0x5a, 0x9c, 0x6e, 0x55, 0x50, 0x26, 0x46, - 0x57, 0x6a, 0x3b, 0x68, 0x3a, 0x46, 0x6f, 0xb5, 0x1e, 0x20, 0x88, 0xb9, 0x59, 0xdd, 0xae, 0x6f, - 0xa0, 0x99, 0x98, 0x40, 0xb3, 0xba, 0x8d, 0x66, 0xe3, 0x6e, 0x77, 0xaa, 0x68, 0x2e, 0x4e, 0xb7, - 0xdb, 0x28, 0x1b, 0xa3, 0x37, 0x3b, 0x55, 0x34, 0x3f, 0x46, 0x6b, 0x08, 0xad, 0xad, 0x43, 0x76, - 0xfc, 0x37, 0x01, 0x5f, 0x87, 0xb9, 0xee, 0x44, 0xb2, 0xe6, 0x60, 0xba, 0x3b, 0x0a, 0x52, 0x2a, - 0xff, 0xfb, 0xf9, 0x4b, 0x25, 0xf1, 0xe2, 0xa5, 0x92, 0x78, 0xf5, 0x52, 0x91, 0x3e, 0x39, 0x56, - 0xa4, 0x2f, 0x8f, 0x95, 0xc4, 0xe1, 0xb1, 0x22, 0x3d, 0x3f, 0x56, 0xa4, 0x1f, 0x8f, 0x15, 0xe9, - 0xa7, 0x63, 0x25, 0xf1, 0xea, 0x58, 0x91, 0x9e, 0x9d, 0x28, 0x89, 0xc3, 0x13, 0x45, 0x7a, 0x7e, - 0xa2, 0x24, 0x5e, 0x9c, 0x28, 0x89, 0xde, 0x54, 0x30, 0x52, 0xfe, 0xf9, 0x6b, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x87, 0xfa, 0xb5, 0x97, 0x4d, 0x0f, 0x00, 0x00, + 0xce, 0xca, 0x56, 0x97, 0x26, 0x45, 0x3a, 0x77, 0x49, 0x91, 0x3f, 0x20, 0x4d, 0xda, 0x00, 0x29, + 0xd3, 0x29, 0x45, 0x00, 0x97, 0x46, 0x0a, 0x25, 0x96, 0x9a, 0x94, 0xae, 0x5d, 0x05, 0xfb, 0x41, + 0x6a, 0x49, 0x49, 0x8e, 0x3f, 0x94, 0x22, 0xd5, 0xce, 0xfb, 0xf8, 0xbd, 0x79, 0x6f, 0x7e, 0x6f, + 0xdf, 0x0c, 0x40, 0x9f, 0x71, 0x5a, 0x74, 0x38, 0x13, 0x0c, 0xa7, 0xfc, 0xf5, 0xf2, 0x3f, 0x74, + 0x43, 0xec, 0x7b, 0xbd, 0x62, 0x9f, 0x59, 0x25, 0x9d, 0x93, 0x3d, 0x62, 0x93, 0x92, 0x65, 0x58, + 0x06, 0x2f, 0x39, 0x0f, 0xf5, 0x70, 0xe5, 0xf4, 0xc2, 0x6f, 0x88, 0x5b, 0xde, 0x7e, 0x2d, 0xc2, + 0x15, 0x9c, 0x12, 0xcb, 0xb0, 0x75, 0x87, 0x33, 0xeb, 0x91, 0x59, 0x62, 0x0e, 0xe5, 0x44, 0x30, + 0xee, 0x96, 0xf6, 0x3c, 0xbb, 0x2f, 0x0c, 0x66, 0xc7, 0x56, 0x51, 0xc4, 0x9c, 0xce, 0x74, 0x16, + 0x2c, 0x4b, 0xfe, 0x2a, 0xd2, 0x2a, 0x3a, 0x63, 0xba, 0x49, 0x4b, 0x81, 0xd4, 0xf3, 0xf6, 0x4a, + 0x03, 0x8f, 0x13, 0x1f, 0x16, 0xd9, 0x57, 0x26, 0xed, 0xc2, 0xb0, 0xa8, 0x2b, 0x88, 0xe5, 0x84, + 0x0e, 0x85, 0x6f, 0x25, 0x98, 0xdf, 0x66, 0xae, 0xe1, 0x63, 0x34, 0x62, 0xeb, 0x14, 0x77, 0x21, + 0xed, 0x0a, 0xc2, 0x45, 0x5e, 0x5a, 0x95, 0xd6, 0x92, 0xe5, 0xff, 0xbd, 0x3a, 0x5e, 0xb9, 0x13, + 0xab, 0xc6, 0x4f, 0x99, 0x8a, 0x7d, 0xea, 0xb9, 0x93, 0xcb, 0x47, 0x66, 0xc9, 0x21, 0xdc, 0xa5, + 0xbc, 0xe4, 0x30, 0x97, 0xfb, 0xb1, 0x8a, 0xdb, 0xcc, 0xd5, 0xc2, 0x68, 0x78, 0x07, 0x92, 0xd4, + 0x1e, 0xe4, 0xe5, 0xab, 0x09, 0xea, 0xc7, 0x2a, 0xdc, 0x84, 0xa5, 0xb2, 0x61, 0x13, 0x7e, 0x58, + 0x7d, 0xe2, 0x70, 0xea, 0xba, 0x06, 0xb3, 0xef, 0x1a, 0xb6, 0x70, 0x71, 0x1e, 0xa6, 0x0d, 0xbb, + 0x6f, 0x7a, 0x03, 0x9a, 0x97, 0x56, 0x93, 0x6b, 0x33, 0xda, 0x50, 0x2c, 0x7c, 0x2f, 0xc1, 0xb2, + 0xaa, 0xeb, 0x9c, 0xea, 0x44, 0xd0, 0x33, 0x58, 0x85, 0x0a, 0x62, 0x98, 0x2e, 0x5e, 0x07, 0x99, + 0x39, 0x41, 0xe1, 0xd9, 0x8d, 0xe5, 0x62, 0xd0, 0x07, 0x43, 0x6f, 0x83, 0xd9, 0xad, 0x80, 0x26, + 0xff, 0xa0, 0x64, 0xe6, 0xe0, 0x65, 0xc8, 0xe8, 0x9c, 0x79, 0x8e, 0x61, 0xeb, 0x79, 0x39, 0xd8, + 0x65, 0x24, 0xfb, 0x09, 0x3c, 0x36, 0xc4, 0x3e, 0xf3, 0x44, 0x3e, 0xb9, 0x2a, 0xad, 0x65, 0xb4, + 0xa1, 0x88, 0x6b, 0x80, 0xe9, 0x68, 0xdb, 0xe1, 0xc1, 0xe7, 0x53, 0xab, 0xd2, 0xda, 0xec, 0xc6, + 0x62, 0xb8, 0xe3, 0x18, 0x1d, 0xe5, 0xd4, 0xd1, 0xf1, 0x4a, 0x42, 0xbb, 0x00, 0x54, 0xf8, 0x52, + 0x86, 0x1b, 0x93, 0xf5, 0x0f, 0x0b, 0xf9, 0x5b, 0xac, 0x90, 0xa5, 0x30, 0x6c, 0xe8, 0x3a, 0x5e, + 0xc3, 0x7f, 0x20, 0x7b, 0x40, 0xfb, 0x82, 0xf1, 0x06, 0x11, 0xfd, 0xfd, 0xb0, 0x12, 0x3f, 0x93, + 0x5c, 0x08, 0xd9, 0x1d, 0xb3, 0x69, 0x13, 0xbe, 0x58, 0x01, 0xe0, 0x54, 0x78, 0xdc, 0x2e, 0x33, + 0x66, 0x46, 0x85, 0xc6, 0x34, 0x57, 0x58, 0x2b, 0xbe, 0x09, 0xe9, 0x7d, 0x9f, 0xda, 0x7c, 0x3a, + 0x40, 0xff, 0x39, 0x5e, 0xd2, 0x04, 0xfb, 0x5a, 0xe8, 0x59, 0xf8, 0x4e, 0x82, 0xec, 0x78, 0x01, + 0xf8, 0x63, 0x48, 0xf5, 0x09, 0x1f, 0x44, 0x9d, 0x5d, 0x7b, 0x75, 0xbc, 0x52, 0x7d, 0xbb, 0x26, + 0x8c, 0x9f, 0xc8, 0x26, 0xe1, 0x03, 0xc3, 0x26, 0xa6, 0x21, 0x0e, 0xb5, 0x20, 0x2c, 0xfe, 0x3b, + 0x64, 0xad, 0x68, 0xab, 0x3a, 0xe9, 0x51, 0xd3, 0x8d, 0xfa, 0x62, 0x42, 0x8b, 0xb3, 0x20, 0x33, + 0x3b, 0x3a, 0x2f, 0x99, 0xd9, 0xf1, 0x76, 0x4d, 0x8d, 0xb7, 0xeb, 0xa7, 0x32, 0x2c, 0xfe, 0x3f, + 0x1a, 0x04, 0x9b, 0xc4, 0x34, 0x87, 0xf4, 0x96, 0x20, 0x33, 0x9c, 0x0f, 0x11, 0xc9, 0x8b, 0xc5, + 0xb3, 0x81, 0x31, 0x44, 0x68, 0x23, 0x27, 0xcc, 0x61, 0x8e, 0xf4, 0x5c, 0x6a, 0x8b, 0x58, 0x62, + 0x11, 0x09, 0x82, 0x3e, 0x71, 0x7a, 0xc5, 0x40, 0xbf, 0x4d, 0x0c, 0x5e, 0xbe, 0xed, 0x93, 0xf0, + 0xe3, 0xf1, 0xca, 0xcd, 0x37, 0x19, 0x7a, 0x21, 0x4e, 0x1d, 0x10, 0x47, 0x50, 0xae, 0x8d, 0xed, + 0x71, 0x09, 0xfd, 0xc9, 0x77, 0x69, 0xf5, 0xc7, 0x90, 0x6b, 0x7a, 0x56, 0x8f, 0xf2, 0xba, 0x21, + 0x28, 0x27, 0xa3, 0x73, 0xc8, 0x41, 0xfa, 0x80, 0x98, 0x1e, 0x0d, 0x0e, 0x41, 0xd2, 0x42, 0xe1, + 0x92, 0x8d, 0xe5, 0x77, 0xdc, 0xb8, 0x2d, 0xb8, 0x4f, 0xdd, 0x6b, 0x36, 0x9e, 0xf9, 0x1d, 0x36, + 0xfe, 0x4c, 0x82, 0xeb, 0xdd, 0x8b, 0xff, 0xed, 0xbf, 0xc6, 0xfe, 0xed, 0xe8, 0x47, 0xed, 0x9e, + 0xff, 0xb5, 0xaf, 0x30, 0x97, 0xcf, 0x93, 0xb0, 0x14, 0x36, 0x7e, 0x9b, 0x9a, 0xc1, 0x77, 0x98, + 0x4a, 0x11, 0x32, 0x41, 0x6f, 0x53, 0xee, 0x06, 0x93, 0x76, 0x76, 0x03, 0x87, 0xa1, 0x83, 0x16, + 0x68, 0x84, 0x26, 0x6d, 0xe4, 0x83, 0xff, 0x0b, 0x33, 0xa3, 0x0b, 0x28, 0xca, 0x65, 0xb9, 0x18, + 0x5e, 0x51, 0xc5, 0xe1, 0x15, 0x55, 0xec, 0x0c, 0x3d, 0xca, 0xa9, 0xa7, 0x3f, 0xad, 0x48, 0xda, + 0x19, 0x04, 0xdf, 0x81, 0x29, 0xb6, 0xb7, 0xe7, 0x52, 0x11, 0xb5, 0xd1, 0x9f, 0xce, 0x81, 0x2b, + 0xd1, 0xfd, 0x57, 0xce, 0xf8, 0xe5, 0x7c, 0xe1, 0xe3, 0x23, 0xc8, 0x55, 0x8e, 0xa3, 0x7f, 0xc1, + 0xf5, 0x70, 0xce, 0xb5, 0x89, 0xe5, 0x98, 0x74, 0x94, 0x71, 0x38, 0x9f, 0x32, 0xda, 0x25, 0x56, + 0xbc, 0x01, 0x39, 0xf7, 0xa1, 0xe1, 0xdc, 0x35, 0x5c, 0xc1, 0x74, 0x4e, 0xac, 0xb2, 0xd7, 0x7f, + 0x48, 0x85, 0x9b, 0x9f, 0x0a, 0x50, 0x17, 0xda, 0xfc, 0x7b, 0xc6, 0xb5, 0x18, 0x13, 0xfb, 0x74, + 0x90, 0x9f, 0x0e, 0xfc, 0x46, 0x72, 0xe1, 0x9b, 0x24, 0x2c, 0x35, 0x88, 0xe0, 0xc6, 0x93, 0x3f, + 0x34, 0x33, 0xb7, 0x21, 0x1d, 0x5c, 0xed, 0x11, 0x19, 0x6f, 0x84, 0x0d, 0x11, 0x97, 0x90, 0x9a, + 0x7e, 0x17, 0x52, 0xaf, 0x98, 0x1c, 0xdf, 0x46, 0xec, 0xfe, 0x3e, 0xe3, 0x74, 0x90, 0xcf, 0x84, + 0xb6, 0xa1, 0x5c, 0xf8, 0x41, 0x86, 0x85, 0xb6, 0xd7, 0x7b, 0xe4, 0x51, 0x7e, 0x38, 0xa4, 0x6c, + 0x8c, 0x02, 0xe9, 0x7d, 0x28, 0x90, 0xdf, 0x83, 0x82, 0xe4, 0x5b, 0x53, 0x70, 0x0b, 0x52, 0xae, + 0xa0, 0xce, 0xdb, 0x90, 0x17, 0x00, 0xae, 0x90, 0xbb, 0xc2, 0x2a, 0x28, 0x6d, 0x41, 0x9d, 0x9a, + 0x7d, 0x40, 0xb8, 0x41, 0x6c, 0x71, 0x6e, 0x6a, 0xfa, 0x03, 0x75, 0x2e, 0xde, 0xfb, 0xb8, 0x05, + 0x29, 0x71, 0xe8, 0xd0, 0xe8, 0x31, 0x70, 0xe7, 0xd5, 0xf1, 0xca, 0xad, 0xdf, 0x7c, 0x0c, 0x58, + 0x6c, 0x40, 0xcd, 0x92, 0x19, 0xdc, 0x6f, 0xc5, 0x20, 0x50, 0xe7, 0xd0, 0xa1, 0x5a, 0x10, 0x08, + 0x63, 0x48, 0xd9, 0xc4, 0xa2, 0xc1, 0xe9, 0xcf, 0x68, 0xc1, 0xfa, 0xec, 0x9e, 0x48, 0xc6, 0xee, + 0x89, 0xc2, 0x6d, 0x58, 0xae, 0xd0, 0x81, 0xe7, 0x98, 0x46, 0x9f, 0x08, 0xaa, 0xda, 0x83, 0x06, + 0xe5, 0x3a, 0x8d, 0x32, 0xfd, 0x20, 0x95, 0x91, 0x90, 0xac, 0x2d, 0x71, 0xcf, 0xae, 0x50, 0x93, + 0x1c, 0xd2, 0x41, 0x93, 0x58, 0x54, 0xa3, 0x16, 0x3b, 0x20, 0x66, 0xe1, 0x1a, 0x2c, 0x54, 0x38, + 0x73, 0x7c, 0x55, 0xe4, 0xbf, 0x7e, 0x24, 0x43, 0xee, 0xa2, 0x57, 0x2a, 0xbe, 0x01, 0x8b, 0xea, + 0xd6, 0x96, 0x56, 0xdd, 0x52, 0x3b, 0xb5, 0x56, 0xf3, 0x41, 0xb7, 0x79, 0xaf, 0xd9, 0xba, 0xdf, + 0x44, 0x09, 0xbc, 0x08, 0x0b, 0x71, 0x43, 0xbb, 0xdb, 0x40, 0xd2, 0xa4, 0x52, 0xdd, 0xdd, 0x42, + 0x32, 0x5e, 0x82, 0x6b, 0x71, 0xe5, 0x66, 0xab, 0xdb, 0xec, 0xa0, 0xe4, 0xa4, 0x6f, 0xa3, 0xd6, + 0x44, 0xa9, 0x73, 0x4a, 0xf5, 0x43, 0x94, 0x9e, 0x0c, 0xb0, 0xa5, 0xb5, 0xba, 0xdb, 0x68, 0x0a, + 0x5f, 0x07, 0x3c, 0x96, 0x41, 0xa7, 0x52, 0xa9, 0xee, 0xa2, 0xe9, 0x0b, 0xf4, 0xbb, 0xaa, 0x86, + 0x32, 0x38, 0x07, 0x28, 0xae, 0xef, 0xb4, 0xb6, 0xef, 0xa1, 0x99, 0xc9, 0x02, 0xcb, 0xad, 0x4e, + 0xa7, 0xd5, 0xb8, 0x87, 0x00, 0xff, 0x05, 0xf2, 0xe7, 0xd2, 0x7e, 0xb0, 0xab, 0xd6, 0xbb, 0xd5, + 0x36, 0x9a, 0xc5, 0x79, 0xc8, 0xc5, 0xad, 0x3b, 0x5d, 0xb5, 0xd9, 0xa9, 0xd5, 0xab, 0x68, 0x6e, + 0xfd, 0x6b, 0x19, 0x16, 0x26, 0xde, 0xc9, 0x18, 0x43, 0xb6, 0x5c, 0x6b, 0xaa, 0xda, 0x47, 0xb1, + 0x03, 0x5c, 0x80, 0xd9, 0x48, 0x57, 0x57, 0x9b, 0x15, 0x24, 0xe1, 0x2c, 0xc0, 0x50, 0xd1, 0xd2, + 0x90, 0x1c, 0x03, 0xd5, 0xbb, 0xcd, 0x7a, 0xb5, 0xdd, 0x46, 0x49, 0x8c, 0x60, 0x2e, 0xd2, 0xa9, + 0x1d, 0xb5, 0xb9, 0x81, 0x52, 0x31, 0x54, 0xbb, 0x5b, 0x46, 0xe9, 0x98, 0xac, 0x56, 0x2a, 0x68, + 0x2a, 0x26, 0x37, 0xba, 0x75, 0x34, 0x1d, 0x97, 0x5b, 0x15, 0x94, 0x89, 0xc9, 0x95, 0xda, 0x2e, + 0x9a, 0x89, 0xc9, 0xdb, 0xad, 0xfb, 0x08, 0x62, 0x69, 0x56, 0x77, 0xea, 0x9b, 0x68, 0x36, 0xe6, + 0xd0, 0xac, 0xee, 0xa0, 0xb9, 0x78, 0xda, 0x9d, 0x2a, 0x9a, 0x8f, 0xcb, 0xed, 0x36, 0xca, 0xc6, + 0xe4, 0xad, 0x4e, 0x15, 0x2d, 0x8c, 0xc9, 0x1a, 0x42, 0xeb, 0x1b, 0x90, 0x1d, 0x7f, 0x7a, 0xe0, + 0x6b, 0x30, 0xdf, 0x9d, 0x38, 0xac, 0x79, 0x98, 0xe9, 0x8e, 0x8a, 0x94, 0xca, 0xff, 0x7e, 0xf6, + 0x42, 0x49, 0x3c, 0x7f, 0xa1, 0x24, 0x5e, 0xbe, 0x50, 0xa4, 0x4f, 0x4e, 0x14, 0xe9, 0xab, 0x13, + 0x25, 0x71, 0x74, 0xa2, 0x48, 0xcf, 0x4e, 0x14, 0xe9, 0xe7, 0x13, 0x45, 0xfa, 0xe5, 0x44, 0x49, + 0xbc, 0x3c, 0x51, 0xa4, 0xa7, 0xa7, 0x4a, 0xe2, 0xe8, 0x54, 0x91, 0x9e, 0x9d, 0x2a, 0x89, 0xe7, + 0xa7, 0x4a, 0xa2, 0x37, 0x15, 0x8c, 0x94, 0x7f, 0xfe, 0x1a, 0x00, 0x00, 0xff, 0xff, 0x89, 0x0c, + 0x77, 0xa8, 0xa1, 0x0f, 0x00, 0x00, } func (x AggregationOperation) String() string { @@ -1415,7 +1441,7 @@ func (this *VectorSelectorDetails) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 10) + s := make([]string, 0, 11) s = append(s, "&core.VectorSelectorDetails{") if this.Matchers != nil { s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") @@ -1425,6 +1451,7 @@ func (this *VectorSelectorDetails) GoString() string { s = append(s, "ExpressionPosition: "+strings.Replace(this.ExpressionPosition.GoString(), `&`, ``, 1)+",\n") s = append(s, "ReturnSampleTimestamps: "+fmt.Sprintf("%#v", this.ReturnSampleTimestamps)+",\n") s = append(s, "SkipHistogramBuckets: "+fmt.Sprintf("%#v", this.SkipHistogramBuckets)+",\n") + s = append(s, "Smoothed: "+fmt.Sprintf("%#v", this.Smoothed)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1432,7 +1459,7 @@ func (this *MatrixSelectorDetails) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 10) + s := make([]string, 0, 12) s = append(s, "&core.MatrixSelectorDetails{") if this.Matchers != nil { s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") @@ -1442,6 +1469,8 @@ func (this *MatrixSelectorDetails) GoString() string { s = append(s, "Range: "+fmt.Sprintf("%#v", this.Range)+",\n") s = append(s, "ExpressionPosition: "+strings.Replace(this.ExpressionPosition.GoString(), `&`, ``, 1)+",\n") s = append(s, "SkipHistogramBuckets: "+fmt.Sprintf("%#v", this.SkipHistogramBuckets)+",\n") + s = append(s, "Smoothed: "+fmt.Sprintf("%#v", this.Smoothed)+",\n") + s = append(s, "Anchored: "+fmt.Sprintf("%#v", this.Anchored)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1945,6 +1974,16 @@ func (m *VectorSelectorDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Smoothed { + i-- + if m.Smoothed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } if m.SkipHistogramBuckets { i-- if m.SkipHistogramBuckets { @@ -2030,6 +2069,26 @@ func (m *MatrixSelectorDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Anchored { + i-- + if m.Anchored { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.Smoothed { + i-- + if m.Smoothed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } if m.SkipHistogramBuckets { i-- if m.SkipHistogramBuckets { @@ -2476,6 +2535,9 @@ func (m *VectorSelectorDetails) Size() (n int) { if m.SkipHistogramBuckets { n += 2 } + if m.Smoothed { + n += 2 + } return n } @@ -2504,6 +2566,12 @@ func (m *MatrixSelectorDetails) Size() (n int) { if m.SkipHistogramBuckets { n += 2 } + if m.Smoothed { + n += 2 + } + if m.Anchored { + n += 2 + } return n } @@ -2703,6 +2771,7 @@ func (this *VectorSelectorDetails) String() string { `ExpressionPosition:` + strings.Replace(strings.Replace(this.ExpressionPosition.String(), "PositionRange", "PositionRange", 1), `&`, ``, 1) + `,`, `ReturnSampleTimestamps:` + fmt.Sprintf("%v", this.ReturnSampleTimestamps) + `,`, `SkipHistogramBuckets:` + fmt.Sprintf("%v", this.SkipHistogramBuckets) + `,`, + `Smoothed:` + fmt.Sprintf("%v", this.Smoothed) + `,`, `}`, }, "") return s @@ -2723,6 +2792,8 @@ func (this *MatrixSelectorDetails) String() string { `Range:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Range), "Duration", "durationpb.Duration", 1), `&`, ``, 1) + `,`, `ExpressionPosition:` + strings.Replace(strings.Replace(this.ExpressionPosition.String(), "PositionRange", "PositionRange", 1), `&`, ``, 1) + `,`, `SkipHistogramBuckets:` + fmt.Sprintf("%v", this.SkipHistogramBuckets) + `,`, + `Smoothed:` + fmt.Sprintf("%v", this.Smoothed) + `,`, + `Anchored:` + fmt.Sprintf("%v", this.Anchored) + `,`, `}`, }, "") return s @@ -4111,6 +4182,26 @@ func (m *VectorSelectorDetails) Unmarshal(dAtA []byte) error { } } m.SkipHistogramBuckets = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Smoothed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Smoothed = bool(v != 0) default: iNdEx = preIndex skippy, err := skipCore(dAtA[iNdEx:]) @@ -4350,6 +4441,46 @@ func (m *MatrixSelectorDetails) Unmarshal(dAtA []byte) error { } } m.SkipHistogramBuckets = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Smoothed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Smoothed = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Anchored", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Anchored = bool(v != 0) default: iNdEx = preIndex skippy, err := skipCore(dAtA[iNdEx:]) diff --git a/pkg/streamingpromql/planning/core/core.proto b/pkg/streamingpromql/planning/core/core.proto index f1f70223fec..c5e5fc335c9 100644 --- a/pkg/streamingpromql/planning/core/core.proto +++ b/pkg/streamingpromql/planning/core/core.proto @@ -139,6 +139,7 @@ message VectorSelectorDetails { PositionRange expressionPosition = 4 [(gogoproto.nullable) = false]; bool returnSampleTimestamps = 5; bool skipHistogramBuckets = 6; + bool smoothed = 7; } message MatrixSelectorDetails { @@ -154,6 +155,8 @@ message MatrixSelectorDetails { ]; PositionRange expressionPosition = 5 [(gogoproto.nullable) = false]; bool skipHistogramBuckets = 6; + bool smoothed = 7; + bool anchored = 8; } message SubqueryDetails { diff --git a/pkg/streamingpromql/planning/core/matrix_selector.go b/pkg/streamingpromql/planning/core/matrix_selector.go index f925a9bc4af..1487dcac76a 100644 --- a/pkg/streamingpromql/planning/core/matrix_selector.go +++ b/pkg/streamingpromql/planning/core/matrix_selector.go @@ -64,7 +64,9 @@ func (m *MatrixSelector) EquivalentToIgnoringHintsAndChildren(other planning.Nod slices.EqualFunc(m.Matchers, otherMatrixSelector.Matchers, matchersEqual) && ((m.Timestamp == nil && otherMatrixSelector.Timestamp == nil) || (m.Timestamp != nil && otherMatrixSelector.Timestamp != nil && m.Timestamp.Equal(*otherMatrixSelector.Timestamp))) && m.Offset == otherMatrixSelector.Offset && - m.Range == otherMatrixSelector.Range + m.Range == otherMatrixSelector.Range && + m.Anchored == otherMatrixSelector.Anchored && + m.Smoothed == otherMatrixSelector.Smoothed } func (m *MatrixSelector) MergeHints(other planning.Node) error { @@ -93,9 +95,15 @@ func MaterializeMatrixSelector(m *MatrixSelector, _ *planning.Materializer, time SkipHistogramBuckets: m.SkipHistogramBuckets, ExpressionPosition: m.ExpressionPosition(), MemoryConsumptionTracker: params.MemoryConsumptionTracker, + Anchored: m.Anchored, + Smoothed: m.Smoothed, } - o := selectors.NewRangeVectorSelector(selector, params.MemoryConsumptionTracker) + if m.Anchored || m.Smoothed { + selector.LookbackDelta = params.LookbackDelta + } + + o := selectors.NewRangeVectorSelector(selector, params.MemoryConsumptionTracker, m.Anchored, m.Smoothed) return planning.NewSingleUseOperatorFactory(o), nil } @@ -104,10 +112,12 @@ func (m *MatrixSelector) ResultType() (parser.ValueType, error) { return parser.ValueTypeMatrix, nil } -func (m *MatrixSelector) QueriedTimeRange(queryTimeRange types.QueryTimeRange, _ time.Duration) planning.QueriedTimeRange { - // Matrix selectors do not use the lookback delta, so we don't pass it below. - minT, maxT := selectors.ComputeQueriedTimeRange(queryTimeRange, TimestampFromTime(m.Timestamp), m.Range, m.Offset.Milliseconds(), 0) - +func (m *MatrixSelector) QueriedTimeRange(queryTimeRange types.QueryTimeRange, lookback time.Duration) planning.QueriedTimeRange { + if !m.Anchored && !m.Smoothed { + // Normal matrix selectors do not use the lookback delta, so we don't pass it below. + lookback = 0 + } + minT, maxT := selectors.ComputeQueriedTimeRange(queryTimeRange, TimestampFromTime(m.Timestamp), m.Range, m.Offset.Milliseconds(), lookback, m.Anchored, m.Smoothed) return planning.NewQueriedTimeRange(timestamp.Time(minT), timestamp.Time(maxT)) } @@ -116,5 +126,8 @@ func (m *MatrixSelector) ExpressionPosition() posrange.PositionRange { } func (m *MatrixSelector) MinimumRequiredPlanVersion() planning.QueryPlanVersion { + if m.Anchored || m.Smoothed { + return planning.QueryPlanV3 + } return planning.QueryPlanVersionZero } diff --git a/pkg/streamingpromql/planning/core/matrix_selector_test.go b/pkg/streamingpromql/planning/core/matrix_selector_test.go index cef911b9790..2cdad8f8e77 100644 --- a/pkg/streamingpromql/planning/core/matrix_selector_test.go +++ b/pkg/streamingpromql/planning/core/matrix_selector_test.go @@ -332,6 +332,144 @@ func TestMatrixSelector_Equivalence(t *testing.T) { }, expectEquivalent: true, }, + "one with smoothed and one without": { + a: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: true, + }, + }, + b: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: false, + }, + }, + expectEquivalent: false, + }, + "both smoothed": { + a: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: true, + }, + }, + b: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: true, + }, + }, + expectEquivalent: true, + }, + "neither smoothed": { + a: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: false, + }, + }, + b: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: false, + }, + }, + expectEquivalent: true, + }, + "one with anchored and one without": { + a: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Anchored: true, + }, + }, + b: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Anchored: false, + }, + }, + expectEquivalent: false, + }, + "both anchored": { + a: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Anchored: true, + }, + }, + b: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Anchored: true, + }, + }, + expectEquivalent: true, + }, + "neither anchored": { + a: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Anchored: false, + }, + }, + b: &MatrixSelector{ + MatrixSelectorDetails: &MatrixSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchNotEqual, Value: "foo"}, + }, + Range: time.Minute, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Anchored: false, + }, + }, + expectEquivalent: true, + }, } for name, testCase := range testCases { diff --git a/pkg/streamingpromql/planning/core/vector_selector.go b/pkg/streamingpromql/planning/core/vector_selector.go index 67166d5856f..23c929346c9 100644 --- a/pkg/streamingpromql/planning/core/vector_selector.go +++ b/pkg/streamingpromql/planning/core/vector_selector.go @@ -70,7 +70,8 @@ func (v *VectorSelector) EquivalentToIgnoringHintsAndChildren(other planning.Nod slices.EqualFunc(v.Matchers, otherVectorSelector.Matchers, matchersEqual) && ((v.Timestamp == nil && otherVectorSelector.Timestamp == nil) || (v.Timestamp != nil && otherVectorSelector.Timestamp != nil && v.Timestamp.Equal(*otherVectorSelector.Timestamp))) && v.Offset == otherVectorSelector.Offset && - v.ReturnSampleTimestamps == otherVectorSelector.ReturnSampleTimestamps + v.ReturnSampleTimestamps == otherVectorSelector.ReturnSampleTimestamps && + v.Smoothed == otherVectorSelector.Smoothed } func (v *VectorSelector) MergeHints(other planning.Node) error { @@ -99,6 +100,7 @@ func MaterializeVectorSelector(v *VectorSelector, _ *planning.Materializer, time SkipHistogramBuckets: v.SkipHistogramBuckets, ExpressionPosition: v.ExpressionPosition(), MemoryConsumptionTracker: params.MemoryConsumptionTracker, + Smoothed: v.Smoothed, } return planning.NewSingleUseOperatorFactory(selectors.NewInstantVectorSelector(selector, params.MemoryConsumptionTracker, v.ReturnSampleTimestamps)), nil @@ -109,8 +111,7 @@ func (v *VectorSelector) ResultType() (parser.ValueType, error) { } func (v *VectorSelector) QueriedTimeRange(queryTimeRange types.QueryTimeRange, lookbackDelta time.Duration) planning.QueriedTimeRange { - minT, maxT := selectors.ComputeQueriedTimeRange(queryTimeRange, TimestampFromTime(v.Timestamp), 0, v.Offset.Milliseconds(), lookbackDelta) - + minT, maxT := selectors.ComputeQueriedTimeRange(queryTimeRange, TimestampFromTime(v.Timestamp), 0, v.Offset.Milliseconds(), lookbackDelta, false, v.Smoothed) return planning.NewQueriedTimeRange(timestamp.Time(minT), timestamp.Time(maxT)) } @@ -119,5 +120,8 @@ func (v *VectorSelector) ExpressionPosition() posrange.PositionRange { } func (v *VectorSelector) MinimumRequiredPlanVersion() planning.QueryPlanVersion { + if v.Smoothed { + return planning.QueryPlanV3 + } return planning.QueryPlanVersionZero } diff --git a/pkg/streamingpromql/planning/core/vector_selector_test.go b/pkg/streamingpromql/planning/core/vector_selector_test.go index b7b2e27eb61..ca687915c83 100644 --- a/pkg/streamingpromql/planning/core/vector_selector_test.go +++ b/pkg/streamingpromql/planning/core/vector_selector_test.go @@ -316,6 +316,69 @@ func TestVectorSelector_Equivalence(t *testing.T) { }, expectEquivalent: true, }, + "one with smoothed and one without": { + a: &VectorSelector{ + VectorSelectorDetails: &VectorSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchEqual, Value: "foo"}, + }, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: true, + }, + }, + b: &VectorSelector{ + VectorSelectorDetails: &VectorSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchEqual, Value: "foo"}, + }, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: false, + }, + }, + expectEquivalent: false, + }, + "both smoothed": { + a: &VectorSelector{ + VectorSelectorDetails: &VectorSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchEqual, Value: "foo"}, + }, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: true, + }, + }, + b: &VectorSelector{ + VectorSelectorDetails: &VectorSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchEqual, Value: "foo"}, + }, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: true, + }, + }, + expectEquivalent: true, + }, + "both not smoothed": { + a: &VectorSelector{ + VectorSelectorDetails: &VectorSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchEqual, Value: "foo"}, + }, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: false, + }, + }, + b: &VectorSelector{ + VectorSelectorDetails: &VectorSelectorDetails{ + Matchers: []*LabelMatcher{ + {Name: "__name__", Type: labels.MatchEqual, Value: "foo"}, + }, + ExpressionPosition: PositionRange{Start: 1, End: 2}, + Smoothed: false, + }, + }, + expectEquivalent: true, + }, } for name, testCase := range testCases { diff --git a/pkg/streamingpromql/planning/plan.go b/pkg/streamingpromql/planning/plan.go index 2b040872249..8bf0cd08918 100644 --- a/pkg/streamingpromql/planning/plan.go +++ b/pkg/streamingpromql/planning/plan.go @@ -29,7 +29,7 @@ func (v QueryPlanVersion) String() string { return strconv.FormatUint(uint64(v), 10) } -var MaximumSupportedQueryPlanVersion = QueryPlanV1 +var MaximumSupportedQueryPlanVersion = QueryPlanV3 const QueryPlanVersionZero = QueryPlanVersion(0) @@ -38,6 +38,9 @@ const QueryPlanVersionZero = QueryPlanVersion(0) // 2. Step invariant expression node const QueryPlanV1 = QueryPlanVersion(1) +// This version introduces the anchored & smoothed range selector modifiers +const QueryPlanV3 = QueryPlanVersion(3) + type QueryPlan struct { TimeRange types.QueryTimeRange Root Node diff --git a/pkg/streamingpromql/testdata/ours-only/anchored.test b/pkg/streamingpromql/testdata/ours-only/anchored.test new file mode 100644 index 00000000000..ca2b99ab247 --- /dev/null +++ b/pkg/streamingpromql/testdata/ours-only/anchored.test @@ -0,0 +1,8 @@ +# These can be moved into ours/anchored.test once this PR is merged and vendored in - https://github.com/prometheus/prometheus/pull/17479 +load 1m + metric 1 2 _ 4 5 + +eval instant at 2m resets(metric[1m] anchored) + +eval instant at 2m changes(metric[1m] anchored) + diff --git a/pkg/streamingpromql/testdata/ours/anchored.test b/pkg/streamingpromql/testdata/ours/anchored.test new file mode 100644 index 00000000000..c9fdb672a43 --- /dev/null +++ b/pkg/streamingpromql/testdata/ours/anchored.test @@ -0,0 +1,458 @@ +load 15s + metric 1+1x4 9+1x4 + +eval instant at 5s increase(metric[1m]) + +eval instant at 20s increase(metric[1m]) + {} 1.833333333 + +eval instant at 35s increase(metric[1m]) + {} 2.833333333 + +eval instant at 50s increase(metric[1m]) + {} 4 + +eval instant at 65s increase(metric[1m]) + {} 4 + +eval instant at 80s increase(metric[1m]) + {} 8 + +eval instant at 95s increase(metric[1m]) + {} 8 + +eval instant at 110s increase(metric[1m]) + {} 8 + +eval instant at 125s increase(metric[1m]) + {} 4 + +eval instant at 5s increase(metric[1m] anchored) + {} 0 + +# The increase = 2-1 = 1 {F: 1, T: -40000}, {F: 1, T: 0}, {F: 2, T: 15000}, {F: 2, T: 20000} +eval instant at 20s increase(metric[1m] anchored) + {} 1 + +eval instant at 35s increase(metric[1m] anchored) + {} 2 + +eval instant at 50s increase(metric[1m] anchored) + {} 3 + +eval instant at 65s increase(metric[1m] anchored) + {} 4 + +eval instant at 80s increase(metric[1m] anchored) + {} 7 + +eval instant at 95s increase(metric[1m] anchored) + {} 7 + +eval instant at 110s increase(metric[1m] anchored) + {} 7 + +eval instant at 125s increase(metric[1m] anchored) + {} 7 + +clear +load 15s + metric 1+1x2 _ _ 9+1x4 + +eval instant at 5s increase(metric[1m]) + +eval instant at 20s increase(metric[1m]) + {} 1.833333333 + +eval instant at 35s increase(metric[1m]) + {} 2.833333333 + +eval instant at 50s increase(metric[1m]) + {} 3.166666666 + +eval instant at 65s increase(metric[1m]) + {} 2.166666666 + +eval instant at 80s increase(metric[1m]) + {} 8 + +eval instant at 95s increase(metric[1m]) + {} 1.833333333 + +eval instant at 110s increase(metric[1m]) + {} 2.833333333 + +eval instant at 125s increase(metric[1m]) + {} 4 + +eval instant at 5s increase(metric[1m] anchored) + {} 0 + +eval instant at 20s increase(metric[1m] anchored) + {} 1 + +eval instant at 35s increase(metric[1m] anchored) + {} 2 + +eval instant at 50s increase(metric[1m] anchored) + {} 2 + +eval instant at 65s increase(metric[1m] anchored) + {} 2 + +eval instant at 80s increase(metric[1m] anchored) + {} 7 + +eval instant at 95s increase(metric[1m] anchored) + {} 7 + +eval instant at 110s increase(metric[1m] anchored) + {} 8 + +eval instant at 125s increase(metric[1m] anchored) + {} 9 + +# Test that inverval is left-open. + +clear +load 1m + metric 1 2 _ 4 5 + +# There is no values within the range of 1m-2m (left-open / right-closed). +# In this case the anchoring does not fill from the extended look-back. +eval instant at 2m increase(metric[1m] anchored) + +eval instant at 2m rate(metric[1m] anchored) + +eval instant at 2m delta(metric[1m] anchored) + +# In this case the series is 1, 2, 2 +# The 1 is picked up in the look-back <= 59s, the 2 is in the range, and the 2 is re-used for the end of the range value +eval instant at 2m increase(metric[1m1s] anchored) + {} 1 + +# Basic test with counter resets + +clear +load 1m + metric{id="1"} 1+1x4 1+1x4 + metric{id="2"} 3 2+2x9 + metric{id="3"} 5+3x2 3+3x6 + +# Without the anchored modifier only a single point is returned for the range query +eval instant at 1m30s metric[1m] + expect range vector from 1m to 1m step 0 + {__name__="metric", id="1"} 2 + {__name__="metric", id="2"} 2 + {__name__="metric", id="3"} 8 + +# Since there is only a single point in the range, the increase can not return a value +eval instant at 1m30s increase(metric[1m]) + +# When the anchor modifier is used, the range query will return the following; +# id=1 - {F: 1, T: 30000}, {F: 2, T: 60000}, {F: 2, T: 90000} +# id=2 - {F: 3, T: 30000}, {F: 2, T: 60000}, {F: 2, T: 90000} +# id=3 - {F: 5, T: 30000}, {F: 8, T: 60000}, {F: 8, T: 90000} +# Note that for id=2, the value goes down and this is series is treated as a counter +eval instant at 1m30s increase(metric[1m] anchored) + {id="1"} 1 + {id="2"} 2 + {id="3"} 3 + +# Since there is only a single point in the range, the delta can not return a value +eval instant at 1m30s delta(metric[1m]) + +eval instant at 1m30s delta(metric[1m] anchored) + {id="1"} 1 + {id="2"} -1 + {id="3"} 3 + +# Since there is only a single point in the range, the rate can not return a value +eval instant at 1m30s rate(metric[1m]) + +eval instant at 1m30s rate(metric[1m] anchored) + {id="1"} 0.016666666666666666 + {id="2"} 0.03333333333333333 + {id="3"} 0.05 + +# Since there is only a single point in the range, the resets tally is 0 +eval instant at 1m30s resets(metric[1m]) + {id="1"} 0 + {id="2"} 0 + {id="3"} 0 + +eval instant at 1m30s resets(metric[1m] anchored) + {id="1"} 0 + {id="2"} 1 + {id="3"} 0 + +eval instant at 3m delta(metric[1m] anchored) + {id="1"} 1 + {id="2"} 2 + {id="3"} -8 + +eval instant at 3m increase(metric[1m] anchored) + {id="1"} 1 + {id="2"} 2 + {id="3"} 3 + +eval instant at 3m resets(metric[1m] anchored) + {id="1"} 0 + {id="2"} 0 + {id="3"} 1 + +eval instant at 3m changes(metric[1m] anchored) + {id="1"} 1 + {id="2"} 1 + {id="3"} 1 + +eval instant at 3m rate(metric[1m] anchored) + {id="1"} 0.016666666666666666 + {id="2"} 0.03333333333333333 + {id="3"} 0.05 + +eval instant at 3m30s delta(metric[1m] anchored) + {id="1"} 1 + {id="2"} 2 + {id="3"} -8 + +eval instant at 6m increase(metric[5m]) + {id="1"} 5 + {id="2"} 10 + {id="3"} 15 + +eval instant at 5m increase(metric[5m] anchored) + {id="1"} 5 + {id="2"} 10 + {id="3"} 15 + +eval instant at 15m increase(metric[5m] anchored) + +clear +load 1m + metric{id="1"} 1+1x10 + metric{id="2"} 1 1+1x10 + metric{id="3"} 99-1x10 + metric{id="4"} 99 99-1x10 + +eval instant at 5m changes(metric[5m]) + {id="1"} 4 + {id="2"} 4 + {id="3"} 4 + {id="4"} 4 + +eval instant at 5m30s changes(metric[5m]) + {id="1"} 4 + {id="2"} 4 + {id="3"} 4 + {id="4"} 4 + +eval instant at 5m0s changes(metric[5m] anchored) + {id="1"} 5 + {id="2"} 4 + {id="3"} 5 + {id="4"} 4 + +eval instant at 6m changes(metric[5m] anchored) + {id="1"} 5 + {id="2"} 5 + {id="3"} 5 + {id="4"} 5 + +eval instant at 5m30s changes(metric[5m] anchored) + {id="1"} 5 + {id="2"} 4 + {id="3"} 5 + {id="4"} 4 + +eval instant at 5m30s resets(metric[5m]) + {id="1"} 0 + {id="2"} 0 + {id="3"} 4 + {id="4"} 4 + +eval instant at 5m30s resets(metric[5m] anchored) + {id="1"} 0 + {id="2"} 0 + {id="3"} 5 + {id="4"} 4 + +clear +load 1m + metric{id="1"} 2 _ 1 _ _ _ _ _ 0 + metric{id="2"} 99-1x10 + +eval instant at 2m changes(metric[1m]) + {id="1"} 0 + {id="2"} 0 + +eval instant at 3m changes(metric[1m]) + {id="2"} 0 + +eval instant at 2m changes(metric[1m] anchored) + {id="1"} 1 + {id="2"} 1 + +# I dont think this is correct as metric{id="1"}[1m] anchored returns an empty matrix +#eval instant at 3m changes(metric[1m] anchored) +# {id="1"} 1 +# {id="2"} 1 + +# {F: 0, T: 420000}, {F: 0, T: 480000} +eval instant at 8m changes(metric[1m] anchored) + {id="1"} 0 + {id="2"} 1 + +# {F: 1, T: 419999}, {F: 0, T: 480000} +eval instant at 8m changes(metric[1m1ms] anchored) + {id="1"} 1 + {id="2"} 2 + +eval instant at 2m resets(metric[1m]) + {id="1"} 0 + {id="2"} 0 + +eval instant at 3m resets(metric[1m]) + {id="2"} 0 + +eval instant at 2m resets(metric[1m] anchored) + {id="1"} 1 + {id="2"} 1 + +eval instant at 3m metric[1m] anchored + expect range vector from 2m to 3m step 1m + {__name__="metric", id="2"} 97 96 + +# I dont think this is correct as metric{id="1"}[1m] anchored returns an empty matrix - see above +#eval instant at 3m resets(metric[1m] anchored) +# {id="1"} 1 +# {id="2"} 1 + +eval instant at 8m resets(metric[1m] anchored) + {id="1"} 0 + {id="2"} 1 + +eval instant at 8m resets(metric[1m1ms] anchored) + {id="1"} 1 + {id="2"} 2 + +clear + +eval instant at 1m resets(foo[3m] anchored) + +eval instant at 1m changes(foo[3m] anchored) + +clear + +load 1m + mixed 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 4 5 6 7+1x60 + +eval instant at 1m resets(mixed[1m] anchored) + {} 0 + +eval instant at 3m resets(mixed[1m] anchored) + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 4m resets(mixed[1m] anchored) + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 5m resets(mixed[1m] anchored) + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +# Whilst the histogram is within the range + look-back window the query will fail +eval instant at 8m resets(mixed[1m] anchored) + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 10m resets(mixed[1m] anchored) + {} 0 + +clear + +load 1m + metric 1 2 3 NaN -NaN 4 5 6 + metric_inf 1 2 3 Inf Inf 4 5 6 + +eval instant at 1m resets(metric[1m] anchored) + {} 0 + +eval instant at 3m resets(metric[1m] anchored) + {} 0 + +eval instant at 4m resets(metric[1m] anchored) + {} 0 + +eval instant at 2m metric[2m] anchored + expect range vector from 0 to 2m step 1m + {__name__="metric"} 1 2 3 + +eval instant at 3m metric[2m] anchored + expect range vector from 1m to 3m step 1m + {__name__="metric"} 2 3 NaN + +eval instant at 4m metric[2m] anchored + expect range vector from 2m to 4m step 1m + {__name__="metric"} 3 NaN NaN + +eval instant at 5m metric[2m] anchored + expect range vector from 3m to 5m step 1m + {__name__="metric"} NaN NaN 4 + +eval instant at 6m metric[2m] anchored + expect range vector from 4m to 6m step 1m + {__name__="metric"} NaN 4 5 + +eval instant at 2m metric_inf[2m] anchored + expect range vector from 0 to 2m step 1m + {__name__="metric_inf"} 1 2 3 + +eval instant at 3m metric_inf[2m] anchored + expect range vector from 1m to 3m step 1m + {__name__="metric_inf"} 2 3 Inf + +eval instant at 4m metric_inf[2m] anchored + expect range vector from 2m to 4m step 1m + {__name__="metric_inf"} 3 Inf Inf + +eval instant at 5m metric_inf[2m] anchored + expect range vector from 3m to 5m step 1m + {__name__="metric_inf"} Inf Inf 4 + +eval instant at 6m metric_inf[2m] anchored + expect range vector from 4m to 6m step 1m + {__name__="metric_inf"} Inf 4 5 + +clear + +load 1m + metric 1+1x10000 + +eval instant at 30m metric[20m] anchored + expect range vector from 10m to 30m step 1m + {__name__="metric"} 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + +eval range from 0 to 10m step 1m increase(metric[1m] anchored) + {} 0 1 1 1 1 1 1 1 1 1 1 + +eval range from 0 to 10m step 1m increase(metric[1m2s] anchored) + {} 0 1 2 2 2 2 2 2 2 2 2 + +clear +load 1m + metric{id="1"} 11 -1 100 0 + metric{id="2"} 0 0 100 0 0 11 -1 + +eval instant at 5m30s delta(metric[5m] anchored) + {id="1"} -11 + {id="2"} 11 + +eval instant at 5m45s delta(metric[5m] anchored) + {id="1"} -11 + {id="2"} 11 + +eval instant at 5m30s rate(metric[5m] anchored) + {id="1"} 0.3333333333333333 + {id="2"} 0.37 + +eval instant at 5m45s rate(metric[5m] anchored) + {id="1"} 0.3333333333333333 + {id="2"} 0.37 \ No newline at end of file diff --git a/pkg/streamingpromql/testdata/ours/smoothed.test b/pkg/streamingpromql/testdata/ours/smoothed.test new file mode 100644 index 00000000000..1ff37f8d9ca --- /dev/null +++ b/pkg/streamingpromql/testdata/ours/smoothed.test @@ -0,0 +1,681 @@ +load 1m + metric{id="1"} 0 1+1x60 _ _ _ _ _ _ 2 + metric{id="2"} 0 1+1x60 _ _ _ _ _ 2 + +eval instant at 59m metric smoothed + metric{id="1"} 59 + metric{id="2"} 59 + +eval instant at 60m metric smoothed + metric{id="1"} 60 + metric{id="2"} 60 + +eval instant at 61m metric smoothed + metric{id="1"} 61 + metric{id="2"} 61 + +eval instant at 62m metric smoothed + metric{id="1"} 61 + metric{id="2"} 51.166666666666664 + +eval instant at 63m metric smoothed + metric{id="1"} 44.14285714285714 + metric{id="2"} 41.33333333333333 + +clear + +# This test exercises the handling of the smoothed head and tail points +load 1s + metric _ 1 2 3 4 _ 6 7 8 9 10 + +# This query will have a nil smoothedHead and smoothed tail +eval instant at 1s increase(metric[1s] smoothed) + {} 0 + +# This query will have a nil smoothedHead, but a value for the smoothedTail +eval instant at 5s increase(metric[1s] smoothed) + {} 1 + +# This query will have a value for smoothedHead, but a nil for the smoothedTail +eval instant at 6s increase(metric[1s] smoothed) + {} 1 + +# This query will have a nil smoothedHead and smoothed tail +eval instant at 1s rate(metric[1s] smoothed) + {} 0 + +# This query will have a nil smoothedHead, but a value for the smoothedTail +eval instant at 5s rate(metric[1s] smoothed) + {} 1 + +# This query will have a value for smoothedHead, but a nil for the smoothedTail +eval instant at 6s rate(metric[1s] smoothed) + {} 1 + +# This query will have a nil smoothedHead and smoothed tail +eval instant at 1s delta(metric[1s] smoothed) + {} 0 + +# This query will have a nil smoothedHead, but a value for the smoothedTail +eval instant at 5s delta(metric[1s] smoothed) + {} 1 + +# This query will have a value for smoothedHead, but a nil for the smoothedTail +eval instant at 6s delta(metric[1s] smoothed) + {} 1 + +clear +load 15s + metric 1+1x4 9+1x4 + +eval instant at 5s increase(metric[1m] smoothed) + {} 0.333333333 + +eval instant at 20s increase(metric[1m] smoothed) + {} 1.333333333 + +eval instant at 35s increase(metric[1m] smoothed) + {} 2.333333333 + +eval instant at 50s increase(metric[1m] smoothed) + {} 3.333333333 + +eval instant at 65s increase(metric[1m] smoothed) + {} 5 + +eval instant at 80s increase(metric[1m] smoothed) + {} 7 + +eval instant at 95s increase(metric[1m] smoothed) + {} 7 + +eval instant at 110s increase(metric[1m] smoothed) + {} 7 + +eval instant at 125s increase(metric[1m] smoothed) + {} 6 + +eval instant at 5s rate(metric[1m] smoothed) + {} 0.005555555555555554 + +eval instant at 20s rate(metric[1m] smoothed) + {} 0.022222222222222223 + +eval instant at 35s rate(metric[1m] smoothed) + {} 0.03888888888888889 + +eval instant at 50s rate(metric[1m] smoothed) + {} 0.05555555555555555 + +eval instant at 65s rate(metric[1m] smoothed) + {} 0.08333333333333333 + +eval instant at 80s rate(metric[1m] smoothed) + {} 0.11666666666666667 + +eval instant at 95s rate(metric[1m] smoothed) + {} 0.11666666666666667 + +eval instant at 110s rate(metric[1m] smoothed) + {} 0.11666666666666668 + +eval instant at 125s rate(metric[1m] smoothed) + {} 0.10000000000000002 + +eval instant at 5s delta(metric[1m] smoothed) + {} 0.333333333 + +eval instant at 20s delta(metric[1m] smoothed) + {} 1.333333333 + +eval instant at 35s delta(metric[1m] smoothed) + {} 2.333333333 + +eval instant at 50s delta(metric[1m] smoothed) + {} 3.333333333 + +eval instant at 65s delta(metric[1m] smoothed) + {} 5 + +eval instant at 80s delta(metric[1m] smoothed) + {} 7 + +eval instant at 95s delta(metric[1m] smoothed) + {} 7 + +eval instant at 110s delta(metric[1m] smoothed) + {} 7 + +eval instant at 125s delta(metric[1m] smoothed) + {} 6 + +clear +load 15s + metric 1+1x2 _ _ 9+1x4 + +eval instant at 5s increase(metric[1m] smoothed) + {} 0.333333333 + +eval instant at 20s increase(metric[1m] smoothed) + {} 1.333333333 + +eval instant at 35s increase(metric[1m] smoothed) + {} 2.666666666 + +eval instant at 50s increase(metric[1m] smoothed) + {} 4.666666666 + +eval instant at 65s increase(metric[1m] smoothed) + {} 6.333333333 + +eval instant at 80s increase(metric[1m] smoothed) + {} 7 + +eval instant at 95s increase(metric[1m] smoothed) + {} 6.666666666 + +eval instant at 110s increase(metric[1m] smoothed) + {} 5.666666666 + +eval instant at 125s increase(metric[1m] smoothed) + {} 4.666666666 + +eval instant at 5s rate(metric[1m] smoothed) + {} 0.005555555555555554 + +eval instant at 20s rate(metric[1m] smoothed) + {} 0.022222222222222223 + +eval instant at 35s rate(metric[1m] smoothed) + {} 0.04444444444444444 + +eval instant at 50s rate(metric[1m] smoothed) + {} 0.07777777777777777 + +eval instant at 65s rate(metric[1m] smoothed) + {} 0.10555555555555557 + +eval instant at 80s rate(metric[1m] smoothed) + {} 0.11666666666666667 + +eval instant at 95s rate(metric[1m] smoothed) + {} 0.11111111111111113 + +eval instant at 110s rate(metric[1m] smoothed) + {} 0.09444444444444447 + +eval instant at 125s rate(metric[1m] smoothed) + {} 0.07777777777777778 + +# Test that interval is left-open. + +clear +load 1m + metric 1 2 _ 4 5 + +eval instant at 2m increase(metric[1m] smoothed) + {} 1 + +# Basic test with counter resets + +clear +load 1m + metric{id="1"} 1+1x4 1+1x4 + metric{id="2"} 3 2+2x9 + metric{id="3"} 5+3x2 3+3x6 + +eval instant at 1m30s increase(metric[1m] smoothed) + {id="1"} 1 + {id="2"} 2 + {id="3"} 3 + +eval instant at 6m15s increase(metric[5m] smoothed) + {id="1"} 5 + {id="2"} 10 + {id="3"} 15 + +eval instant at 6m increase(metric[5m] smoothed) + {id="1"} 5 + {id="2"} 10 + {id="3"} 15 + +eval instant at 1m30s rate(metric[1m] smoothed) + {id="1"} 0.016666666666666666 + {id="2"} 0.03333333333333333 + {id="3"} 0.05 + +eval instant at 6m15s rate(metric[5m] smoothed) + {id="1"} 0.016666666666666666 + {id="2"} 0.03333333333333333 + {id="3"} 0.05 + +eval instant at 6m rate(metric[5m] smoothed) + {id="1"} 0.016666666666666666 + {id="2"} 0.03333333333333333 + {id="3"} 0.05 + +clear +load 1m + metric{id="1"} 11 -1 100 0 + metric{id="2"} 0 0 100 0 0 11 -1 + +eval instant at 5m30s delta(metric[5m] smoothed) + {id="1"} -5 + {id="2"} 5 + +eval instant at 5m45s delta(metric[5m] smoothed) + {id="1"} -2 + {id="2"} 2 + +eval instant at 5m30s rate(metric[5m] smoothed) + {id="1"} 0.3333333333333333 + {id="2"} 0.405 + +eval instant at 5m45s rate(metric[5m] smoothed) + {id="1"} 0.3333333333333333 + {id="2"} 0.4041666666666667 + +clear +load 1m + metric 9 8 5 4 + +eval instant at 2m15s increase(metric[2m] smoothed) + {} 12 + +clear +load 10s + metric 1+1x10 + withreset 1+1x4 1+1x5 + notregular 0 5 100 2 8 + +eval instant at 10s metric smoothed + metric 2 + +eval instant at 15s metric smoothed + metric 2.5 + +eval instant at 5s metric smoothed + metric 1.5 + +eval instant at 105s metric smoothed + metric 11 + +eval instant at 45s withreset smoothed + withreset 3 + +eval instant at 30s notregular smoothed + notregular 2 + +clear +load 1s + mixed 0 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 2 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 4 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 6 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} + missing 0 _ 2 _ _ 5 _ _ _ _ _ + nans 0 NaN 2 NaN NaN 5 NaN NaN NaN NaN NaN + infs 0 Inf 2 Inf Inf 5 Inf Inf Inf Inf Inf + +eval instant at 1s mixed smoothed + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 8s mixed smoothed + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 0 missing smoothed + missing 0 + +eval instant at 1s missing smoothed + missing 1 + +eval instant at 4s missing smoothed + missing 4 + +eval instant at 8s missing smoothed + missing 5 + +eval instant at 10s missing smoothed + missing 5 + +eval instant at 30s missing smoothed + missing 5 + +eval instant at 30m missing smoothed + +eval instant at 0 nans smoothed + nans 0 + +eval instant at 1s nans smoothed + nans NaN + +eval instant at 2s nans smoothed + nans 2 + +eval instant at 4s nans smoothed + nans NaN + +eval instant at 8s nans smoothed + nans NaN + +eval instant at 10s nans smoothed + nans NaN + +eval instant at 30s nans smoothed + nans NaN + +eval instant at 30m nans smoothed + +eval instant at 0 infs smoothed + infs 0 + +eval instant at 1s infs smoothed + infs Inf + +eval instant at 2s infs smoothed + infs 2 + +eval instant at 4s infs smoothed + infs Inf + +eval instant at 8s infs smoothed + infs Inf + +eval instant at 10s infs smoothed + infs Inf + +eval instant at 30s infs smoothed + infs Inf + +eval instant at 30m infs smoothed + +clear +load 1s + zeros 0 0 _ 0 0 + const 1 1 _ 1 1 + inc 0 1 2 _ 4 5 + dec 5 4 3 _ 1 0 + var 2 1 3 _ 4 3 + +eval instant at 0s zeros smoothed + zeros 0 + +eval instant at 1s zeros smoothed + zeros 0 + +eval instant at 2s zeros smoothed + zeros 0 + +eval instant at 3s zeros smoothed + zeros 0 + +eval instant at 4s zeros smoothed + zeros 0 + +eval instant at 0s const smoothed + const 1 + +eval instant at 1s const smoothed + const 1 + +eval instant at 2s const smoothed + const 1 + +eval instant at 3s const smoothed + const 1 + +eval instant at 4s const smoothed + const 1 + +eval instant at 0s inc smoothed + inc 0 + +eval instant at 1s inc smoothed + inc 1 + +eval instant at 2s inc smoothed + inc 2 + +eval instant at 3s inc smoothed + inc 3 + +eval instant at 4s inc smoothed + inc 4 + +eval instant at 0s dec smoothed + dec 5 + +eval instant at 1s dec smoothed + dec 4 + +eval instant at 2s dec smoothed + dec 3 + +eval instant at 3s dec smoothed + dec 2 + +eval instant at 4s dec smoothed + dec 1 + +eval instant at 0s var smoothed + var 2 + +eval instant at 1s var smoothed + var 1 + +eval instant at 2s var smoothed + var 3 + +eval instant at 3s var smoothed + var 3.5 + +eval instant at 4s var smoothed + var 4 + +clear +load 1s + metric{instance="1"} 1+1x10 _ _ 10-1x10 + metric{instance="2"} 1 5 6 3 1 8 7 3 0 19 NaN NaN 10-1x10 + metric{instance="3"} 0x10 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 10-1x10 + +eval instant at 0 increase(metric[1m] smoothed) + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 10m increase(metric[1s] smoothed) + +eval instant at 0 increase(metric{instance="1"}[1m] smoothed) + {instance="1"} 0 + +eval instant at 0 increase(metric{instance="2"}[1m] smoothed) + {instance="2"} 0 + +eval instant at 1m40s increase(metric{instance="1"}[1m] smoothed) + +eval instant at 1m40s increase(metric{instance="2"}[1m] smoothed) + +eval instant at 2m increase(metric{instance="1"}[1m] smoothed) + +eval instant at 2m increase(metric{instance="2"}[1m] smoothed) + +eval instant at 3m increase(metric{instance="1"}[10s] smoothed) + +eval instant at 3m increase(metric{instance="2"}[10s] smoothed) + +eval instant at 5m increase(metric{instance="1"}[1m] smoothed) + +eval instant at 5m increase(metric{instance="2"}[1m] smoothed) + +eval instant at 0 rate(metric[1m] smoothed) + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 10m rate(metric[1s] smoothed) + +eval instant at 0 rate(metric{instance="1"}[1m] smoothed) + {instance="1"} 0 + +eval instant at 0 rate(metric{instance="2"}[1m] smoothed) + {instance="2"} 0 + +eval instant at 1m40s rate(metric{instance="1"}[1m] smoothed) + +eval instant at 1m40s rate(metric{instance="2"}[1m] smoothed) + +eval instant at 2m rate(metric{instance="1"}[1m] smoothed) + +eval instant at 2m rate(metric{instance="2"}[1m] smoothed) + +eval instant at 3m rate(metric{instance="1"}[10s] smoothed) + +eval instant at 3m rate(metric{instance="2"}[10s] smoothed) + +eval instant at 5m rate(metric{instance="1"}[1m] smoothed) + +eval instant at 5m rate(metric{instance="2"}[1m] smoothed) + +eval instant at 0 delta(metric[1m] smoothed) + expect fail msg: smoothed and anchored modifiers do not work with native histograms + +eval instant at 10m delta(metric[1s] smoothed) + +eval instant at 0 delta(metric{instance="1"}[1m] smoothed) + {instance="1"} 0 + +eval instant at 0 delta(metric{instance="2"}[1m] smoothed) + {instance="2"} 0 + +eval instant at 1m40s delta(metric{instance="1"}[1m] smoothed) + +eval instant at 1m40s delta(metric{instance="2"}[1m] smoothed) + +eval instant at 2m delta(metric{instance="1"}[1m] smoothed) + +eval instant at 2m delta(metric{instance="2"}[1m] smoothed) + +eval instant at 3m delta(metric{instance="1"}[10s] smoothed) + +eval instant at 3m delta(metric{instance="2"}[10s] smoothed) + +eval instant at 5m delta(metric{instance="1"}[1m] smoothed) + +eval instant at 5m delta(metric{instance="2"}[1m] smoothed) + +clear +load 1m + metric{instance="1"} 1+1x10 _ _ 10-1x1000 + metric{instance="2"} 1 5 6 3 1 8 7 3 0 19 18 17 10-1x1000 + +eval instant at 0 increase(metric[1m] smoothed) + {instance="1"} 0 + {instance="2"} 0 + +eval instant at 30s increase(metric[1m] smoothed) + {instance="1"} 0.5 + {instance="2"} 2 + +eval instant at 59s increase(metric[1m] smoothed) + {instance="1"} 0.9833333333333334 + {instance="2"} 3.9333333333333336 + +eval instant at 60s increase(metric[1m] smoothed) + {instance="1"} 1 + {instance="2"} 4 + +eval instant at 61s increase(metric[1m] smoothed) + {instance="1"} 1 + {instance="2"} 3.95 + +eval instant at 69s increase(metric[1m] smoothed) + {instance="1"} 1 + {instance="2"} 3.5500000000000003 + +eval instant at 98s increase(metric[1m1s] smoothed) + {instance="1"} 1.0166666666666666 + {instance="2"} 2.166666666666666 + +eval instant at 100m increase(metric[10m38s] smoothed) + {instance="1"} -792 + {instance="2"} -803 + +eval instant at 100m increase(metric[100m] smoothed) + {instance="1"} -2938 + {instance="2"} -2946 + +eval instant at 100m increase(metric[500m] smoothed) + {instance="1"} -2938 + {instance="2"} -2946 + +eval instant at 0 rate(metric[1m] smoothed) + {instance="1"} 0 + {instance="2"} 0 + +eval instant at 30s rate(metric[1m] smoothed) + {instance="1"} 0.008333333333333333 + {instance="2"} 0.03333333333333333 + +eval instant at 59s rate(metric[1m] smoothed) + {instance="1"} 0.01638888888888889 + {instance="2"} 0.06555555555555556 + +eval instant at 60s rate(metric[1m] smoothed) + {instance="1"} 0.016666666666666666 + {instance="2"} 0.06666666666666667 + +eval instant at 61s rate(metric[1m] smoothed) + {instance="1"} 0.016666666666666666 + {instance="2"} 0.06583333333333334 + +eval instant at 69s rate(metric[1m] smoothed) + {instance="1"} 0.016666666666666666 + {instance="2"} 0.05916666666666667 + +eval instant at 98s rate(metric[1m1s] smoothed) + {instance="1"} 0.016666666666666666 + {instance="2"} 0.0355191256830601 + +eval instant at 100m rate(metric[10m38s] smoothed) + {instance="1"} -1.2413793103448276 + {instance="2"} -1.2586206896551724 + +eval instant at 100m rate(metric[100m] smoothed) + {instance="1"} -0.48966666666666664 + {instance="2"} -0.491 + +eval instant at 100m rate(metric[500m] smoothed) + {instance="1"} -0.09793333333333333 + {instance="2"} -0.0982 + +eval instant at 0 delta(metric[1m] smoothed) + {instance="1"} 0 + {instance="2"} 0 + +eval instant at 30s delta(metric[1m] smoothed) + {instance="1"} 0.5 + {instance="2"} 2 + +eval instant at 59s delta(metric[1m] smoothed) + {instance="1"} 0.9833333333333334 + {instance="2"} 3.9333333333333336 + +eval instant at 60s delta(metric[1m] smoothed) + {instance="1"} 1 + {instance="2"} 4 + +eval instant at 61s delta(metric[1m] smoothed) + {instance="1"} 1 + {instance="2"} 3.95 + +eval instant at 69s delta(metric[1m] smoothed) + {instance="1"} 1 + {instance="2"} 3.5500000000000003 + +eval instant at 98s increase(metric[1m1s] smoothed) + {instance="1"} 1.0166666666666666 + {instance="2"} 2.166666666666666 + +eval instant at 100m delta(metric[10m38s] smoothed) + {instance="1"} -10.63333333333334 + {instance="2"} -10.63333333333334 + +eval instant at 100m delta(metric[100m] smoothed) + {instance="1"} -78 + {instance="2"} -79 + +eval instant at 100m delta(metric[500m] smoothed) + {instance="1"} -78 + {instance="2"} -79 \ No newline at end of file diff --git a/pkg/streamingpromql/testdata/upstream/extended_vectors.test.disabled b/pkg/streamingpromql/testdata/upstream/extended_vectors.test similarity index 100% rename from pkg/streamingpromql/testdata/upstream/extended_vectors.test.disabled rename to pkg/streamingpromql/testdata/upstream/extended_vectors.test diff --git a/pkg/streamingpromql/types/data.go b/pkg/streamingpromql/types/data.go index a8815c0423d..3444eca5054 100644 --- a/pkg/streamingpromql/types/data.go +++ b/pkg/streamingpromql/types/data.go @@ -155,6 +155,23 @@ type RangeVectorStepData struct { // produced by the query. // RangeEnd is inclusive (ie. points with timestamp <= RangeEnd are included in the range). RangeEnd int64 + + // Anchored is set to true when the anchored modifier has been requested on a range query + Anchored bool + + // Smoothed is set to true when the smoothed modifier has been requested on a range query + Smoothed bool + + // SmoothedBasisForHeadPoint and SmoothedBasisForTailPoint are set when a smoothed modifier has been applied to a range vector selector. + // They are derived points using samples from outside the range and can not be re-calculated from Floats. + // Either can be nil if there was no point within the lookback window immediately before or after the range, respectively. + // + // When the smoothed range is used by a rate/increase function the points on the range boundaries + // are calculated differently to accommodate counter arithmetic for the derived values spanning the boundary. + // To avoid needing to re-calculate these alternate points they are included here for the rate/increate function + // handler to substitute in. + SmoothedBasisForHeadPoint *promql.FPoint + SmoothedBasisForTailPoint *promql.FPoint } type ScalarData struct { diff --git a/pkg/streamingpromql/types/fpoint_ring_buffer.go b/pkg/streamingpromql/types/fpoint_ring_buffer.go index 62e06834a4d..7ba7cc8e0b0 100644 --- a/pkg/streamingpromql/types/fpoint_ring_buffer.go +++ b/pkg/streamingpromql/types/fpoint_ring_buffer.go @@ -106,6 +106,16 @@ func (b *FPointRingBuffer) ViewUntilSearchingForwards(maxT int64, existing *FPoi return existing } +// ViewAll returns a view which includes all points in the ring buffer. +// The returned view is no longer valid if this buffer is modified (eg. a point is added, or the buffer is reset or closed). +func (b *FPointRingBuffer) ViewAll(existing *FPointRingBufferView) *FPointRingBufferView { + if existing == nil { + existing = &FPointRingBufferView{buffer: b} + } + existing.size = b.size + return existing +} + // ViewUntilSearchingBackwards is like ViewUntilSearchingForwards, except it examines the points from the end of the buffer, so // is preferred over ViewUntilSearchingForwards if it is expected that only a few of the points will have timestamp greater than maxT. func (b *FPointRingBuffer) ViewUntilSearchingBackwards(maxT int64, existing *FPointRingBufferView) *FPointRingBufferView { @@ -294,6 +304,142 @@ func (v FPointRingBufferView) Clone() (*FPointRingBufferView, *FPointRingBuffer, return view, buffer, nil } +func (v FPointRingBufferView) Iterator(existing *FPointRingBufferViewIterator) *FPointRingBufferViewIterator { + if existing == nil { + existing = &FPointRingBufferViewIterator{} + } + existing.idx = 0 + existing.view = &v + return existing +} + +// FPointRingBufferViewIterator is an iterator which can be used over a FPointRingBufferView +type FPointRingBufferViewIterator struct { + idx int + view *FPointRingBufferView +} + +//func NewFPointRingBufferViewIterator(view *FPointRingBufferView) *FPointRingBufferViewIterator { +// return &FPointRingBufferViewIterator{view: view} +//} + +func (i *FPointRingBufferViewIterator) Count() int { + return i.view.Count() +} + +func (i *FPointRingBufferViewIterator) HasNext() bool { + return i.idx < i.view.Count() +} + +// Next moves the iterator forward, returning the next point. +// This function will panic if moving the iterator would result in an index out of bounds. +func (i *FPointRingBufferViewIterator) Next() promql.FPoint { + if i.idx >= i.view.Count() { + panic(fmt.Sprintf("next(): out of range, requested index %v but have length %v", i.idx, i.view.Count())) + } + p := i.view.PointAt(i.idx) + i.idx++ + return p +} + +func (i *FPointRingBufferViewIterator) At() promql.FPoint { + return i.view.PointAt(i.idx) +} + +// Prev moves the iterator backwards, returning the previous point. +// This function will panic if moving the iterator would result in an index out of bounds. +func (i *FPointRingBufferViewIterator) Prev() promql.FPoint { + if i.idx <= 0 { + panic(fmt.Sprintf("prev(): out of range, requested index %v", i.idx-1)) + } + i.idx-- + return i.view.PointAt(i.idx) +} + +// advance will move the iterator forward. +func (i *FPointRingBufferViewIterator) advance() { + if i.idx < i.view.Count() { + i.idx++ + } +} + +// reverse will move the iterator backwards. +func (i *FPointRingBufferViewIterator) reverse() { + if i.idx > 0 { + i.idx-- + } +} + +// Peek returns the next point, but does not move the iterator forward. +// This function will panic if this look ahead would result in an index out of bounds. +func (i *FPointRingBufferViewIterator) Peek() promql.FPoint { + if i.idx >= i.view.Count() { + panic(fmt.Sprintf("peek(): out of range, requested index %v but have length %v", i.idx, i.view.Count())) + } + return i.view.PointAt(i.idx) +} + +// Seek will return the point which is closest to being <= time, or the first point after this time. +// The iterator will be positioned to the first point > time. +func (i *FPointRingBufferViewIterator) Seek(time int64) promql.FPoint { + var first promql.FPoint + ok := false + + for i.HasNext() { + next := i.Peek() + + if next.T < time { + first = next + ok = true + i.advance() + continue + } + + if next.T == time { + i.advance() + return next + } + + if ok { + return first + } + + return next + } + + if !ok { + panic(fmt.Sprintf("seek(): out of range - no record found")) + } + return first +} + +// CopyRemainingPointsTo will accumulate all points <= time into the given buff. +// The iterator will be positioned at the first point which is >= time. +// If there is no point >= time, then the iterator is positioned at the last point < time. +func (i *FPointRingBufferViewIterator) CopyRemainingPointsTo(time int64, buff []promql.FPoint) []promql.FPoint { + for i.HasNext() { + next := i.Next() + + if next.T <= time { + buff = append(buff, next) + + if next.T == time { + // move the iterator back so that the the at() call will return this 'next' point + i.reverse() + return buff + } + + } else { + // This is the first point to be >= rangeEnd + break + } + } + + // move the iterator back so that the the at() call will return this last point which caused our loop to exit + i.reverse() + return buff +} + // These hooks exist so we can override them during unit tests. var getFPointSliceForRingBuffer = FPointSlicePool.Get var putFPointSliceForRingBuffer = FPointSlicePool.Put diff --git a/pkg/streamingpromql/types/ring_buffer_test.go b/pkg/streamingpromql/types/ring_buffer_test.go index af6939cf3e9..0064fb8b26c 100644 --- a/pkg/streamingpromql/types/ring_buffer_test.go +++ b/pkg/streamingpromql/types/ring_buffer_test.go @@ -563,3 +563,133 @@ func setupRingBufferTestingPools(t *testing.T) { putHPointSliceForRingBuffer = originalPutHPointSlice }) } + +func TestFPointRingBufferEmptySet(t *testing.T) { + floats := NewFPointRingBuffer(&limiter.MemoryConsumptionTracker{}) + require.NoError(t, floats.Use([]promql.FPoint{})) + it := floats.ViewAll(nil).Iterator(nil) + require.False(t, it.HasNext()) + it.advance() + it.advance() + require.False(t, it.HasNext()) +} + +func TestFPointRingBufferSinglePoint(t *testing.T) { + floats := NewFPointRingBuffer(&limiter.MemoryConsumptionTracker{}) + require.NoError(t, floats.Use([]promql.FPoint{{T: 0, F: 1}})) + it := floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Peek()) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Next()) + require.False(t, it.HasNext()) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Prev()) +} + +func TestFPointRingBufferDoublePoint(t *testing.T) { + floats := NewFPointRingBuffer(&limiter.MemoryConsumptionTracker{}) + require.NoError(t, floats.Use([]promql.FPoint{{T: 0, F: 1}, {T: 1, F: 1}})) + it := floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Peek()) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Next()) + require.Equal(t, promql.FPoint{T: 1, F: 1}, it.Peek()) + require.Equal(t, promql.FPoint{T: 1, F: 1}, it.Next()) + require.False(t, it.HasNext()) +} + +func TestFPointRingBufferAdvance(t *testing.T) { + floats := NewFPointRingBuffer(&limiter.MemoryConsumptionTracker{}) + require.NoError(t, floats.Use([]promql.FPoint{{T: 0, F: 1}, {T: 1, F: 1}, {T: 2, F: 1}, {T: 3, F: 1}, {T: 4, F: 1}, {T: 5, F: 1}, {T: 6, F: 1}, {T: 7, F: 1}})) + it := floats.ViewAll(nil).Iterator(nil) + i := 0 + for i < 8 { + require.Equal(t, promql.FPoint{T: int64(i), F: 1}, it.Peek()) + require.Equal(t, promql.FPoint{T: int64(i), F: 1}, it.Next()) + + if i > 0 { + require.Equal(t, promql.FPoint{T: int64(i), F: 1}, it.Prev()) + it.advance() + } + i++ + } + require.False(t, it.HasNext()) +} + +func TestFPointRingBufferSeek(t *testing.T) { + floats := NewFPointRingBuffer(&limiter.MemoryConsumptionTracker{}) + require.NoError(t, floats.Use([]promql.FPoint{{T: 0, F: 1}, {T: 1, F: 1}, {T: 2, F: 1}, {T: 4, F: 1}, {T: 5, F: 1}, {T: 6, F: 1}, {T: 8, F: 1}, {T: 9, F: 1}})) + it := floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Seek(-1)) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Next()) + + it = floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 0, F: 1}, it.Seek(0)) + require.Equal(t, promql.FPoint{T: 1, F: 1}, it.Next()) + + it = floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 2, F: 1}, it.Seek(2)) + require.Equal(t, promql.FPoint{T: 4, F: 1}, it.Next()) + + it = floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 2, F: 1}, it.Seek(3)) + require.Equal(t, promql.FPoint{T: 4, F: 1}, it.Next()) + + it = floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 8, F: 1}, it.Seek(8)) + require.Equal(t, promql.FPoint{T: 9, F: 1}, it.Next()) + + it = floats.ViewAll(nil).Iterator(nil) + require.Equal(t, promql.FPoint{T: 9, F: 1}, it.Seek(9)) + require.False(t, it.HasNext()) +} + +func TestFPointRingBufferCopyRemainingPoints(t *testing.T) { + floats := NewFPointRingBuffer(&limiter.MemoryConsumptionTracker{}) + require.NoError(t, floats.Use([]promql.FPoint{{T: 0, F: 1}, {T: 1, F: 1}, {T: 2, F: 1}, {T: 4, F: 1}, {T: 5, F: 1}, {T: 6, F: 1}, {T: 8, F: 1}, {T: 10, F: 1}})) + it := floats.ViewAll(nil).Iterator(nil) + buff := make([]promql.FPoint, 0, it.view.Count()) + buff = it.CopyRemainingPointsTo(-1, buff) + last := it.At() + + require.Equal(t, 0, len(buff)) + require.Equal(t, promql.FPoint{T: 0, F: 1}, last) + + it = floats.ViewAll(nil).Iterator(nil) + buff = it.CopyRemainingPointsTo(0, buff) + last = it.At() + require.Equal(t, 1, len(buff)) + require.Equal(t, promql.FPoint{T: 0, F: 1}, last) + + buff = buff[:0] + it = floats.ViewAll(nil).Iterator(nil) + buff = it.CopyRemainingPointsTo(1, buff) + last = it.At() + require.Equal(t, 2, len(buff)) + require.Equal(t, promql.FPoint{T: 1, F: 1}, last) + + buff = buff[:0] + it = floats.ViewAll(nil).Iterator(nil) + buff = it.CopyRemainingPointsTo(3, buff) + last = it.At() + require.Equal(t, 3, len(buff)) + require.Equal(t, promql.FPoint{T: 4, F: 1}, last) + + buff = buff[:0] + it = floats.ViewAll(nil).Iterator(nil) + buff = it.CopyRemainingPointsTo(4, buff) + last = it.At() + require.Equal(t, 4, len(buff)) + require.Equal(t, promql.FPoint{T: 4, F: 1}, last) + + buff = buff[:0] + it = floats.ViewAll(nil).Iterator(nil) + buff = it.CopyRemainingPointsTo(8, buff) + last = it.At() + require.Equal(t, 7, len(buff)) + require.Equal(t, promql.FPoint{T: 8, F: 1}, last) + + buff = buff[:0] + it = floats.ViewAll(nil).Iterator(nil) + buff = it.CopyRemainingPointsTo(9, buff) + last = it.At() + require.Equal(t, 7, len(buff)) + require.Equal(t, promql.FPoint{T: 10, F: 1}, last) +} diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 822a2fb96e0..35822fa48dd 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -224,6 +224,7 @@ type Limits struct { BlockedRequests BlockedRequestsConfig `yaml:"blocked_requests,omitempty" json:"blocked_requests,omitempty" doc:"nocli|description=List of HTTP requests to block." category:"experimental"` AlignQueriesWithStep bool `yaml:"align_queries_with_step" json:"align_queries_with_step"` EnabledPromQLExperimentalFunctions flagext.StringSliceCSV `yaml:"enabled_promql_experimental_functions" json:"enabled_promql_experimental_functions"` + EnabledPromQLExtendedRangeSelectors flagext.StringSliceCSV `yaml:"enabled_promql_extended_range_selectors" json:"enabled_promql_extended_range_selectors"` Prom2RangeCompat bool `yaml:"prom2_range_compat" json:"prom2_range_compat" category:"experimental"` SubquerySpinOffEnabled bool `yaml:"subquery_spin_off_enabled" json:"subquery_spin_off_enabled" category:"experimental"` LabelsQueryOptimizerEnabled bool `yaml:"labels_query_optimizer_enabled" json:"labels_query_optimizer_enabled" category:"advanced"` @@ -478,6 +479,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxQueryExpressionSizeBytes, MaxQueryExpressionSizeBytesFlag, 0, "Max size of the raw query, in bytes. This limit is enforced by the query-frontend for instant, range and remote read queries. 0 to not apply a limit to the size of the query.") f.BoolVar(&l.AlignQueriesWithStep, alignQueriesWithStepFlag, false, "Mutate incoming queries to align their start and end with their step to improve result caching.") f.Var(&l.EnabledPromQLExperimentalFunctions, "query-frontend.enabled-promql-experimental-functions", "Enable certain experimental PromQL functions, which are subject to being changed or removed at any time, on a per-tenant basis. Defaults to empty which means all experimental functions are disabled. Set to 'all' to enable all experimental functions.") + f.Var(&l.EnabledPromQLExtendedRangeSelectors, "query-frontend.enabled-promql-extended-range-selectors", "Enable certain experimental PromQL extended range selector modifiers, which are subject to being changed or removed at any time, on a per-tenant basis. Defaults to empty which means all experimental modifiers are disabled. Set to 'all' to enable all experimental modifiers.") f.BoolVar(&l.Prom2RangeCompat, "query-frontend.prom2-range-compat", false, "Rewrite queries using the same range selector and resolution [X:X] which don't work in Prometheus 3.0 to a nearly identical form that works with Prometheus 3.0 semantics") f.BoolVar(&l.SubquerySpinOffEnabled, "query-frontend.subquery-spin-off-enabled", false, "Enable spinning off subqueries from instant queries as range queries to optimize their performance.") f.BoolVar(&l.LabelsQueryOptimizerEnabled, "query-frontend.labels-query-optimizer-enabled", true, "Enable labels query optimizations. When enabled, the query-frontend may rewrite labels queries to improve their performance.") @@ -1448,6 +1450,10 @@ func (o *Overrides) EnabledPromQLExperimentalFunctions(userID string) []string { return o.getOverridesForUser(userID).EnabledPromQLExperimentalFunctions } +func (o *Overrides) EnabledPromQLExtendedRangeSelectors(userID string) []string { + return o.getOverridesForUser(userID).EnabledPromQLExtendedRangeSelectors +} + func (o *Overrides) Prom2RangeCompat(userID string) bool { return o.getOverridesForUser(userID).Prom2RangeCompat }