Skip to content

Commit 73caefc

Browse files
update README.md
1 parent 25c5702 commit 73caefc

File tree

3 files changed

+35
-17
lines changed

3 files changed

+35
-17
lines changed

README.md

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,25 +15,43 @@ These benchmarks can be executed on a vanilla Kubernetes or OpenShift cluster. I
1515
* Non-OpenShift Deployments Only; Optional: [Cadvisor](https://github.com/google/cadvisor)
1616

1717
* Notes
18-
* Clone git repositories into sibling directories to the `loki-benchmarks` one.
19-
* Recommended cluster size: `m4.16xlarge`
18+
* Clone git repositories into sibling directories to the `loki-benchmarks` one.
19+
* Recommended cluster size: `m4.16xlarge`
2020

2121
## Configuring Tests
2222

2323
To change the testing configuration, see the files in the [config](./config) directory.
2424

25-
Use the `scenarios/benchmarks.yaml` file to add, modify, or remove configurations. Modify the `generator.yaml`, `metrics.yaml`, or `querier.yaml` in the prefered deployment method directory to change these soruces.
25+
Different scenarios can be customized under `config/benchmarks/scenarios/benchmarks`. Current the benchmarks support two testing scenarios:
26+
27+
* Ingestion path scenarios: [suppored configuration](https://github.com/observatorium/loki-benchmarks/blob/1a0a9e8f6190475b6c1bfacb5a31a88bd76cbb36/internal/config/config.go#L76-L81), this test will generate X amount of logs throughout a 30 minute window that's supposed to represent a full day of log ingestion.
28+
* Query path scenarios: [supported configuration](https://github.com/observatorium/loki-benchmarks/blob/1a0a9e8f6190475b6c1bfacb5a31a88bd76cbb36/internal/config/config.go#L102-L108), the theory behind this test is to generate the amount of data that would be queried before it starts running the queries.
2629

2730
## Running Benchmarks
2831

29-
Use the `make run-rhobs-benchmarks` or `make run-operator-benchmarks` to execute the benchmark program with the RHOBS or operator deployment styles on OpenShift respectively. Upon successful completion, a JSON and XML file will be created in the `reports/date+time` directory with the results of the tests.
32+
### Prerequisites
3033

31-
## Troubleshooting
34+
The `run-operator-benchmarks` expects the following two env vars to be set `LOKI_OPERATOR_REGISTRY` `LOKI_STORAGE_BUCKET`.
35+
E.g
36+
37+
```shell
38+
export LOKI_OPERATOR_REGISTRY=jmarcal
39+
export LOKI_STORAGE_BUCKET=jmarcal-loki-benchmark-storage
40+
```
3241

33-
During benchmark execution, use [hack/scripts/ocp-deploy-grafana.sh](hack/scripts/ocp-deploy-grafana.sh) to deploy grafna and connect to Loki as a datasource:
34-
- Use a web browser to access grafana UI. The URL, username and password are printed by the script
35-
- In the UI, under settings -> data-sources hit `Save & test` to verify that Loki data-source is connected and that there are no errors
36-
- In explore tab change the data-source to `Loki` and use `{client="promtail"}` query to visualize log lines
37-
- Use additional queries such as `rate({client="promtail"}[1m])` to verify the behaviour of Loki and the benchmark
42+
### Steps
43+
44+
1. Use the `make run-rhobs-benchmarks` or `make run-operator-benchmarks` to execute the benchmark program with the RHOBS or operator deployment styles on OpenShift respectively.
45+
Both commands will run all the scenarios under `config/benchmarks/scenarios/benchmarks`.
46+
2. Upon successful completion of each scenario, a JSON and XML file will be created in the `reports/date+time/schenario_name` directory with the results of the tests.
47+
3. Once all scenarios have been run we can run `python3 hack/scripts/generate_report.py $PATH_TO_SCENARIO_1 $PATH_TO_SCENARIO_2 $PATH_TO_SCENARIO_...` to compile a report that helps compare the different scenarios.
48+
4. To share the report on gDoc you can run `pthon3 hack/scripts/create-gdoc.py $PATH_TO_THE_REPORT` this will generate a docx file that can then be shared.
49+
50+
## Troubleshooting
3851

52+
During benchmark execution, use [hack/scripts/ocp-deploy-grafana.sh](hack/scripts/ocp-deploy-grafana.sh) to deploy grafna and connect to Loki as a datasource:
3953

54+
* Use a web browser to access grafana UI. The URL, username and password are printed by the script
55+
* In the UI, under settings -> data-sources hit `Save & test` to verify that Loki data-source is connected and that there are no errors
56+
* In explore tab change the data-source to `Loki` and use `{client="promtail"}` query to visualize log lines
57+
* Use additional queries such as `rate({client="promtail"}[1m])` to verify the behaviour of Loki and the benchmark

internal/metrics/client.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -252,22 +252,22 @@ func (c *Client) measureCommonRequestMetrics(
252252
var name, code, badCode, requestRateName, badRequestRateName string
253253

254254
if method == GRPCMethod {
255-
name = fmt.Sprintf("%s successful GRPC %s", job, route)
255+
name = fmt.Sprintf("%s successful GRPC %s", annotation, route)
256256
code = "success"
257257
requestRateName = name
258258
if pathRoutes == GRPCReadPathRoutes {
259259
requestRateName = "successful GRPC reads"
260260
}
261261
} else {
262-
name = fmt.Sprintf("%s 2xx %s", job, route)
262+
name = fmt.Sprintf("%s 2xx %s", annotation, route)
263263
code = "2.*"
264264
requestRateName = name
265265
if pathRoutes == HTTPReadPathRoutes {
266266
requestRateName = "2xx reads"
267267
}
268268

269269
badCode = "5.*"
270-
badRequestRateName = fmt.Sprintf("%s 5xx %s", job, route)
270+
badRequestRateName = fmt.Sprintf("%s 5xx %s", annotation, route)
271271
if pathRoutes == HTTPReadPathRoutes {
272272
badRequestRateName = "5xx reads"
273273
}

internal/metrics/resources.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import (
99

1010
func ContainerCPU(job string, duration model.Duration, annotation gmeasure.Annotation) Measurement {
1111
return Measurement{
12-
Name: fmt.Sprintf("%s Sum of Container CPU Usage", annotation),
12+
Name: fmt.Sprintf("%s sum of Container CPU Usage", annotation),
1313
Query: fmt.Sprintf(
1414
`sum(avg_over_time(pod:container_cpu_usage:sum{pod=~".*%s.*"}[%s])) * %d`,
1515
job, duration, CoresToMillicores,
@@ -21,7 +21,7 @@ func ContainerCPU(job string, duration model.Duration, annotation gmeasure.Annot
2121

2222
func ContainerMemoryWorkingSetBytes(job string, duration model.Duration, annotation gmeasure.Annotation) Measurement {
2323
return Measurement{
24-
Name: fmt.Sprintf("%s Sum of Container WorkingSet Memory", annotation),
24+
Name: fmt.Sprintf("%s sum of Container WorkingSet Memory", annotation),
2525
Query: fmt.Sprintf(
2626
`sum(avg_over_time(container_memory_working_set_bytes{pod=~".*%s.*", container=""}[%s]) / %d)`,
2727
job, duration, BytesToGigabytesMultiplier,
@@ -33,7 +33,7 @@ func ContainerMemoryWorkingSetBytes(job string, duration model.Duration, annotat
3333

3434
func ContainerGoMemstatsHeapInuse(job string, _ model.Duration, annotation gmeasure.Annotation) Measurement {
3535
return Measurement{
36-
Name: fmt.Sprintf("%s Sum of Container Go Memstats Heap Inuse", annotation),
36+
Name: fmt.Sprintf("%s sum of Container Go Memstats Heap Inuse", annotation),
3737
Query: fmt.Sprintf(
3838
`sum(go_memstats_heap_inuse_bytes{pod=~".*%s.*"}) / %d`,
3939
job, BytesToGigabytesMultiplier,
@@ -45,7 +45,7 @@ func ContainerGoMemstatsHeapInuse(job string, _ model.Duration, annotation gmeas
4545

4646
func PersistentVolumeUsedBytes(job string, duration model.Duration, annotation gmeasure.Annotation) Measurement {
4747
return Measurement{
48-
Name: fmt.Sprintf("%s Sum of Persistent Volume Used Bytes", annotation),
48+
Name: fmt.Sprintf("%s sum of Persistent Volume Used Bytes", annotation),
4949
Query: fmt.Sprintf(
5050
`sum(avg_over_time(kubelet_volume_stats_used_bytes{persistentvolumeclaim=~".*%s.*"}[%s]) / %d)`,
5151
job, duration, BytesToGigabytesMultiplier,

0 commit comments

Comments
 (0)