diff --git a/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss b/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss index fba99f49b..f56dc3321 100644 --- a/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss +++ b/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss @@ -11,6 +11,8 @@ background-color: $--background-color-empty; color: $--color-text-primary; padding: 15px; + overflow-x: scroll; + padding-bottom: 0 !important; } pre code { @@ -205,167 +207,46 @@ border-color: $--button-icon-border-color; } } - .binary-operator-precedence{ - ul li{ + .binary-operator-precedence { + ul li { list-style: decimal !important; } } } -/*外部引用 样式start*/ -// .doc-content { -// font-size: 16px; -// } - -// .doc-content p, -// .doc-content.ul, -// .doc-content .alert { -// margin: 15px 0 15px 0; -// line-height: 1.5; -// color: $--color-text-primary; -// } - -// .doc-content .content-divider { -// height: 1px; -// width: 100%; -// border-bottom: 2px solid $--explore-border-color-bottom; -// margin: 5px 0; -// } - -// .doc-content > h1 { -// color: #e6522c; -// font-size: 30px; -// text-transform: uppercase; -// } - -// .doc-content > h1 a { -// color: #000 !important; -// } - -// .doc-content.blog > h1 { -// text-transform: none; -// } - -// .doc-content.blog .sponsor-logos > a > img { -// width: 250px; -// display: inline-block !important; -// margin: 15px 55px; -// } -// .doc-content > h1 { -// color: #e6522c; -// font-size: 24px; -// } - -// .doc-content > h2 { -// color: #e6522c; -// font-size: 18px; -// } - -// .doc-content > h2 code { -// color: #e6522c; -// background: none; -// } - -// .doc-content > h3 { -// font-size: 20px; -// font-weight: bold; -// } - -// .doc-content > h4 { -// font-weight: bold; -// font-size: 18px; -// margin-top: 20px; -// } - -// .doc-content a.header-anchor { -// padding-left: 15px; -// color: gray; -// text-decoration: none; -// } - -// .doc-content img { -// width: 90%; -// margin-left: auto; -// margin-right: auto; -// display: block; -// } - -// .doc-content img.orig-size { -// width: auto; -// margin-left: 0; -// } - -// .doc-content .open-source-notice { -// color: $--color-text-regular; -// background-color: $--background-color-base; -// text-align: center; -// padding: 0.8em; -// margin-top: 1.5em; -// } - -// .toc { -// padding: 1em; -// background-color: $--background-color-base; -// } -// .toc-right { -// float: right; -// width: 40%; -// margin: 0 0 0.5em 0.5em; -// } - -// .toc ul { -// padding: 0 0 0 1.5em; -// margin: 0; -// } - -// .toc a code { -// color: #337ab7; -// background-color: transparent; -// } code { color: $--color-text-primary; } -aside { - color: $--color-text-secondary; - padding-bottom: 8px; - border-bottom: 1px solid #aaa; -} +// aside { +// color: $--color-text-secondary; +// padding-bottom: 8px; +// border-bottom: 1px solid #aaa; +// } +// article { +// margin: 10px 0 60px 0; +// } -article { - margin: 10px 0 60px 0; -} -.explore .introduce-view .title-heard.info-room{ - position: relative; + +// exploreItem +.explore .introduce-view .title-heard.info-room { .logs-content { font-size: 16px; - ul li{ + ul li { margin: 10px 0 10px 24px; list-style: disc; } - p{ + p { margin: 15px 0; } - code{ + code { padding: 5px; background-color: $--background-color-empty; } - .log-link:hover{ - cursor: pointer; - border-bottom: 1px solid #3C92F1; - } - .fillbox{ - display: block; - line-height: 25px; - padding: 10px; - } - h2{ - color: #e6522c; - font-size: 18px; - font-weight: 600; - } - > h1,.page-header { + + > h1, + .page-header { color: #e6522c; font-size: 24px; font-weight: 600; @@ -377,42 +258,49 @@ article { text-decoration: none; } } - > h1,.page-header-one { + > h1, + .page-header-one { color: #e6522c; font-size: 22px; font-weight: 600; margin-top: 15px; text-transform: none; } - > h1,.page-header-two { + > h1, + .page-header-two { color: #e6522c; font-size: 20px; font-weight: 600; margin-top: 15px; text-transform: none; } - .title-heard__divider{ + h2 { + color: #e6522c; + font-size: 18px; + font-weight: 600; + } + .title-heard__divider { margin: 5px 0 5px 0; } - .page-header-label{ + .page-header-label { p { - width: 100%; font-size: 16px; color: $--color-text-primary; font-weight: 400; } } - .json-module{ - padding-left: 20px; + // 链接 + .log-link:hover { + cursor: pointer; + border-bottom: 1px solid #3c92f1; } - .box-overflow{ - overflow-x: scroll; + // 阴影部分 + .fillbox { + display: block; + line-height: 25px; + padding: 10px; } - pre.box-overflow{ - overflow-x: scroll; - padding-bottom: 0 !important; - } - .logfmt-module,.json-module,.pattern-module,.regular-module,.unpack-module,.line-module,.formatting-module,.unwrapped-module{ + .logfmt-module { pre { border: 0; border-left: 0; @@ -424,39 +312,38 @@ article { line-height: 25px; } } - .img-hidden{ + // 图片转文字 + .img-hidden { width: 582px; overflow: hidden; - img{ + img { filter: drop-shadow(-550px 0px $--color-text-primary); transform: translateX(570px); } } } - .catalog{ + // 目录 + .catalog { width: 360px; - position: absolute; - top: 75px; - right: 25px; + float: right; + margin-left: 8px; + margin-top: 5px; background-color: $--background-color-empty; padding: 15px 10px; - ul li{ + ul li { margin: 3px 14px 3px 24px; list-style: circle; - color: #3C92F1; + color: #3c92f1; } - ul.catalog-square>li{ + ul.catalog-square > li { list-style: square; } - ul.catalog-disc>li{ + ul.catalog-disc > li { list-style: disc; } - span:hover{ + span:hover { cursor: pointer; - border-bottom: 1px solid #3C92F1; + border-bottom: 1px solid #3c92f1; } } - .box-w-auto{ - width: calc(100% - 390px) !important; - } } diff --git a/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue b/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue index d2fb8e321..5dbfbdb22 100644 --- a/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue +++ b/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue @@ -203,24 +203,149 @@
Prometheus provides a functional query language called PromQL (Prometheus Query Language) that lets the user select and aggregate time series data in real time. The result of an expression can either be shown as a graph, viewed as tabular data in Prometheus's expression browser, or consumed by external systems via the HTTP API.
This document is meant as a reference. For learning, it might be easier to start with a couple of examples.
In Prometheus's expression language, an expression or sub-expression can evaluate to one of four types:
Strings may be specified as literals in single quotes, double quotes or backticks.
PromQL follows the same escaping rules as Go. In single or double quotes a backslash begins an escape sequence, which may be followed by a, b, f, n, r, t, v or \. Specific characters can be provided using octal (\nnn) or hexadecimal (\xnn, \unnnn and \Unnnnnnnn).
No escaping is processed inside backticks. Unlike Go, Prometheus does not discard newlines inside backticks.
Example:
-"this is a string" +"this is a string" 'these are unescaped: \n \\ \t' `these are not unescaped: \n ' " \t`
Scalar float values can be written as literal integer or floating-point numbers in the format (whitespace only included for better readability):
-[-+]?( +[-+]?( [0-9]*\.?[0-9]+([eE][-+]?[0-9]+)? | 0[xX][0-9a-fA-F]+ | [nN][aA][nN] @@ -270,7 +395,7 @@ NaNInstant vector selectors
-@@ -1092,7 +1217,7 @@ absent_over_time(sum(nonexistent{job="myjob"})[1h:]) instance_memory_limit_bytes - instance_memory_usage_bytes ) / 1024 / 1024+@@ -858,7 +983,7 @@ absent_over_time(sum(nonexistent{job="myjob"})[1h:])Instant vector selectors allow the selection of a set of time series and a single sample value for each at a given timestamp (instant): in the simplest form, only a metric name is specified. This results in an instant vector containing elements for all time series that have this metric name.
This example selects all time series that have the
http_requests_totalmetric name:http_requests_total@@ -286,7 +411,7 @@ NaNRegex matches are fully anchored. A match of
env=~"foo"is treated asenv=~"^foo$".For example, this selects all
-http_requests_totaltime series forstaging,testing, anddevelopmentenvironments and HTTP methods other thanGET.http_requests_total{environment=~"staging|testing|development",method!="GET"}+http_requests_total{environment=~"staging|testing|development",method!="GET"}Label matchers that match empty label values also select all time series that do not have the specific label set at all. It is possible to have multiple matchers for the same label name.
Vector selectors must either specify a name or at least one label matcher that does not match the empty string. The following expression is illegal:
@@ -552,7 +677,7 @@ method:http_requests:rate5m{method="get"} 600 method:http_requests:rate5m{method="del"} 34 method:http_requests:rate5m{method="post"} 120Example query:
-method_code:http_errors:rate5m{code="500"} / ignoring(code) method:http_requests:rate5m+method_code:http_errors:rate5m{code="500"} / ignoring(code) method:http_requests:rate5mThis returns a result vector containing the fraction of HTTP requests with status code of 500 for each method, as measured over the last 5 minutes. Without
ignoring(code)there would have been no match as the metrics do not share the same set of labels. The entries with methodsputanddelhave no match and will not show up in the result:{method="get"} 0.04 // 24 / 600 {method="post"} 0.05 // 6 / 120@@ -563,13 +688,13 @@ method:http_requests:rate5m{method="post"} 120Many-to-one and one-to-many vector matches
@@ -849,7 +974,7 @@ absent_over_time(sum(nonexistent{job="myjob"})[1h:])Many-to-one and one-to-many matchings refer to the case where each vector element on the "one"-side can match with multiple elements on the "many"-side. This has to be explicitly requested using the
-group_leftorgroup_rightmodifiers, where left/right determines which vector has the higher cardinality.<vector expr> <bin-op> ignoring(<label list>) group_left(<label list>) <vector expr> +<vector expr> <bin-op> ignoring(<label list>) group_left(<label list>) <vector expr> <vector expr> <bin-op> ignoring(<label list>) group_right(<label list>) <vector expr> <vector expr> <bin-op> on(<label list>) group_left(<label list>) <vector expr> <vector expr> <bin-op> on(<label list>) group_right(<label list>) <vector expr>The label list provided with the group modifier contains additional labels from the "one"-side to be included in the result metrics. For
ona label can only appear in one of the lists. Every time series of the result vector must be uniquely identifiable.Example query:
-method_code:http_errors:rate5m / ignoring(code) group_left method:http_requests:rate5m+method_code:http_errors:rate5m / ignoring(code) group_left method:http_requests:rate5mIn this case the left vector contains more than one entry per
methodlabel value. Thus, we indicate this usinggroup_left. The elements from the right side are now matched with multiple elements with the samemethodlabel on the left:{method="get", code="500"} 0.04 // 24 / 600 {method="get", code="404"} 0.05 // 30 / 600 @@ -792,9 +917,9 @@ absent_over_time(sum(nonexistent{job="myjob"})[1h:])Example: A histogram metric is called
http_request_duration_seconds. To calculate the 90th percentile of request durations over the last 10m, use the following expression:histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m]))The quantile is calculated for each label combination in
-http_request_duration_seconds. To aggregate, use thesum()aggregator around therate()function. Since thelelabel is required byhistogram_quantile(), it has to be included in thebyclause. The following expression aggregates the 90th percentile byjob:histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m])))+histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m])))To aggregate everything, specify only the
-lelabel:histogram_quantile(0.9, sum by (le) (rate(http_request_duration_seconds_bucket[10m])))+histogram_quantile(0.9, sum by (le) (rate(http_request_duration_seconds_bucket[10m])))The
histogram_quantile()function interpolates quantile values by assuming a linear distribution within a bucket. The highest bucket must have an upper bound of+Inf. (Otherwise,NaNis returned.) If a quantile is located in the highest bucket, the upper bound of the second highest bucket is returned. A lower limit of the lowest bucket is assumed to be 0 if the upper bound of that bucket is greater than 0. In that case, the usual linear interpolation is applied within that bucket. Otherwise, the upper bound of the lowest bucket is returned for quantiles located in the lowest bucket.If
bhas 0 observations,NaNis returned. Ifbcontains fewer than two buckets,NaNis returned. For φ < 0,-Infis returned. For φ > 1,+Infis returned. For φ =NaN,NaNis returned.For each timeseries in
v,label_join(v instant-vector, dst_label string, separator string, src_label_1 string, src_label_2 string, ...)joins all the values of all thesrc_labelsusingseparatorand returns the timeseries with the labeldst_labelcontaining the joined value. There can be any number ofsrc_labelsin this function.This example will return a vector with each time series having a
-foolabel with the valuea,b,cadded to it:label_join(up{job="api-server",src1="a",src2="b",src3="c"}, "foo", ",", "src1", "src2", "src3")+label_join(up{job="api-server",src1="a",src2="b",src3="c"}, "foo", ",", "src1", "src2", "src3")For each timeseries in
v,label_replace(v instant-vector, dst_label string, replacement string, src_label string, regex string)matches the regular expressionregexagainst the value of the labelsrc_label. If it matches, the value of the labeldst_labelin the returned timeseries will be the expansion ofreplacement, together with the original labels in the input. Capturing groups in the regular expression can be referenced with$1,$2, etc. If the regular expression doesn't match then the timeseries is returned unchanged.This example will return timeseries with the values
-a:cat labelserviceandaat labelfoo:label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*")+label_replace(up{job="api-server",service="a:c"}, "foo", "$1", "service", "(.*):.*")If the same fictional cluster scheduler exposed CPU usage metrics like the following for every instance:
-instance_cpu_time_ns{app="lion", proc="web", rev="34d0f99", env="prod", job="cluster-manager"} +instance_cpu_time_ns{app="lion", proc="web", rev="34d0f99", env="prod", job="cluster-manager"} instance_cpu_time_ns{app="elephant", proc="worker", rev="34d0f99", env="prod", job="cluster-manager"} instance_cpu_time_ns{app="turtle", proc="api", rev="4d3a513", env="prod", job="cluster-manager"} instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="cluster-manager"} @@ -1104,143 +1229,66 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
LogQL is Grafana Loki’s PromQL-inspired query language. Queries act as if they are a distributed grep to aggregate log sources. LogQL uses labels and operators for filtering.
There are two types of LogQL queries:
All LogQL queries contain a log stream selector.
+All LogQL queries contain a log stream selector.
Optionally, the log stream selector can be followed by a log pipeline. A log pipeline is a set of stage expressions that are chained together and applied to the selected log streams. Each expression can filter out, parse, or mutate log lines and their respective labels.
The following example shows a full log query in action:
@@ -1263,7 +1311,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c{container="query-frontend",namespace="loki-dev"} which targets the query-frontend container in the loki-dev namespace.|= "metrics.go" | logfmt | duration > 10s and throughput_mb < 500 which will filter out log that contains the word metrics.go, then parses each log line to extract more labels and filter with them.To avoid escaping special characters you can use the+`(backtick) instead of"when quoting strings. For example`\w+`is the same as"\\w+". This is specially useful when writing a regular expression which contains multiple
backslashes that require escaping.
To avoid escaping special characters you can use the`(backtick) instead of"when quoting strings. For example`\w+`is the same as"\\w+". This is specially useful when writing a regular expression which contains multiple
backslashes that require escaping.
String type work exactly like Prometheus label matchers use in log stream selector. This means you can use the same operations (=,!=,=~,!~).
The string type is the only one that can filter out a log line with a label __error__.
+ The string type is the only one that can filter out a log line with a label __error__.
Using Duration, Number and Bytes will convert the label value prior to comparision and support the following comparators:
== or = for equality.It will evaluate first duration >= 20ms or method="GET". To evaluate first method="GET" and size <= 20KB, make sure to use proper parenthesis as shown below.
| duration >= 20ms or (method="GET" and size <= 20KB)
Label filter expressions are the only expression allowed after the unwrap expression. This is mainly to allow filtering errors from the metric extraction.+
Label filter expressions are the only expression allowed after the unwrap expression. This is mainly to allow filtering errors from the metric extraction.
Label filter expressions have support matching IP addresses. See Matching IP addresses for details.
The json parser operates in two modes:
1. without parameters:
-Adding | json to your pipeline will extract all json properties as labels if the log line is a valid json document. Nested properties are flattened into label keys using the _ separator.
Note: Arrays are skipped.
For example the json parsers will extract from the following document:
@@ -1453,7 +1501,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c "response_latency_seconds" => "6.031"2. with parameters:
-Using | json label="expression", another="expression" in your pipeline will extract only the specified json fields to labels. You can specify one or more expressions in this way, the same as label_format; all expressions must be quoted.
Currently, we only support field access (my.field, my["field"]) and array access (list[0]), and any combination of these in any level of nesting (my.list[0]["field"]).
For example, | json first_server="servers[0]", ua="request.headers[\"User-Agent\"] will extract from the following document:
The logfmt parser can be added using the | logfmt and will extract all keys and values from the logfmt formatted log line.
For example the following log line:
-at=info method=GET path=/ host=grafana.net fwd="124.133.124.161" service=8ms status=200+
at=info method=GET path=/ host=grafana.net fwd="124.133.124.161" service=8ms status=200
will get those labels extracted:
"at" => "info"
"method" => "GET"
@@ -1512,7 +1560,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
Pattern
-
+
The pattern parser allows the explicit extraction of fields from log lines by defining a pattern expression (| pattern "<pattern-expression>"). The expression matches the structure of a log line.
Consider this NGINX log line.
0.191.12.2 - - [10/Jun/2021:09:14:29 +0000] "GET /api/plugins/versioncheck HTTP/1.1" 200 2 "-" "Go-http-client/2.0" "13.76.247.102, 34.120.177.193" "TLSv1.2" "US" ""
@@ -1532,7 +1580,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
Literals can be any sequence of UTF-8 characters, including whitespace characters.
By default, a pattern expression is anchored at the start of the log line. If the expression starts with literals, then the log line must also start with the same set of literals. Use <_> at the beginning of the expression if you don’t want to anchor the expression at the start.
Consider the log line
- level=debug ts=2021-06-10T09:24:13.472094048Z caller=logging.go:66 traceID=0568b66ad2d9294c msg="POST /loki/api/v1/push (204) 16.652862ms"
+ level=debug ts=2021-06-10T09:24:13.472094048Z caller=logging.go:66 traceID=0568b66ad2d9294c msg="POST /loki/api/v1/push (204) 16.652862ms"
To match msg=", use the expression:
<_> msg="<method> <path> (<status>) <latency>"
A pattern expression is invalid if
@@ -1545,7 +1593,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
Regular expression
-
+
Unlike the logfmt and json, which extract implicitly all values and takes no parameters, the regexp parser takes a single parameter | regexp "<re>" which is the regular expression using the Golang RE2 syntax.
The regular expression must contain a least one named sub-match (e.g (?P<name>re)), each sub-match will extract a different label.
For example the parser | regexp "(?P<method>\\w+) (?P<path>[\\w|/]+) \\((?P<status>\\d+?)\\) (?P<duration>.*)" will extract from the following line:
@@ -1560,7 +1608,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
unpack
-
+
The unpack parser parses a JSON log line, unpacking all embedded labels from Promtail’s pack stage. A special property _entry will also be used to replace the original log line.
For example, using | unpack with the log line:
{
@@ -1575,7 +1623,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
Line format expression
-
+
The line format expression can rewrite the log line content by using the text/template format. It takes a single string parameter | line_format "{{.label_name}}", which is the template format. All labels are injected variables into the template and are available to use with the {{.label_name}} notation.
For example the following expression:
{container="frontend"} | logfmt | line_format "{{.query}} {{.duration}}"
@@ -1583,7 +1631,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
You can use double quoted string for the template or backticks `{{.label_name}}` to avoid the need to escape special characters.
line_format also supports math functions. Example:
If we have the following labels ip=1.1.1.1, status=200 and duration=3000(ms), we can divide the duration by 1000 to get the value in seconds.
- {container="frontend"} | logfmt | line_format "{{.ip}} {{.status}} {{div .duration 1000}}"
+ {container="frontend"} | logfmt | line_format "{{.ip}} {{.status}} {{div .duration 1000}}"
The above query will give us the line as 1.1.1.1 200 3
See template functions to learn about available functions in the template format.
@@ -1597,7 +1645,7 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
The right side can alternatively be a template string (double quoted or backtick), for example dst="{{.status}} {{.query}}", in which case the dst label value is replaced by the result of the text/template evaluation. This is the same template engine as the | line_format expression, which means labels are available as variables and you can use the same list of functions.
In both cases, if the destination label doesn’t exist, then a new one is created.
The renaming form dst=src will drop the src label after remapping it to the dst label. However, the template form will preserve the referenced labels, such that dst="{{.src}}" results in both dst and src having the same value.
- A single label name can only appear once per expression. This means | label_format foo=bar,foo="new" is not allowed but you can use two expressions for the desired effect: | label_format foo=bar | label_format foo="new"
+ A single label name can only appear once per expression. This means | label_format foo=bar,foo="new" is not allowed but you can use two expressions for the desired effect: | label_format foo=bar | label_format foo="new"
@@ -1624,9 +1672,9 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
Formatting
-
+
The following query shows how you can reformat a log line to make it easier to read on screen.
- {cluster="ops-tools1", name="querier", namespace="loki-dev"}
+ {cluster="ops-tools1", name="querier", namespace="loki-dev"}
|= "metrics.go" != "loki-canary"
| logfmt
| query != ""
@@ -1634,10 +1682,10 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
| line_format "{{ .ts}}\t{{.duration}}\ttraceID = {{.traceID}}\t{{ printf \"%-100.100s\" .query }} "
Label formatting is used to sanitize the query while the line format reduce the amount of information and creates a tabular output.
For these given log lines:
- level=info ts=2020-10-23T20:32:18.094668233Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"loki-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=650.22401ms status=200 throughput_mb=1.529717 total_bytes_mb=0.994659
+ level=info ts=2020-10-23T20:32:18.094668233Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"loki-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=650.22401ms status=200 throughput_mb=1.529717 total_bytes_mb=0.994659
level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"loki-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=624.008132ms status=200 throughput_mb=0.693449 total_bytes_mb=0.432718
The result would be:
- 2020-10-23T20:32:18.094668233Z 650.22401ms traceID = 1980d41501b57b68 {cluster="ops-tools1", job="loki-ops/query-frontend"} |= "query_range"
+ 2020-10-23T20:32:18.094668233Z 650.22401ms traceID = 1980d41501b57b68 {cluster="ops-tools1", job="loki-ops/query-frontend"} |= "query_range"
2020-10-23T20:32:18.068866235Z 624.008132ms traceID = 1980d41501b57b68 {cluster="ops-tools1", job="loki-ops/query-frontend"} |= "query_range"
@@ -1722,7 +1770,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
Unwrapped examples
-
+
quantile_over_time(0.99,
{cluster="ops-tools1",container="ingress-nginx"}
| json
@@ -1774,56 +1822,6 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
-
-
- -
- Log queries
-
- - Log stream selector
- -
- Log pipeline
-
- - Line filter expression
- - Label filter expression
- - Parser expression
- - JSON
- - logfmt
- - Pattern
- - Regular expression
- - unpack
- - Line format expression
- - Labels format expression
-
-
- -
- Log queries examples
-
- - Multiple filtering
- - Multiple parsers
- - Formatting
-
-
-
-
- -
- Metric queries
-
- -
- Range Vector aggregation
-
- - Log range aggregations
- - Unwrapped range aggregations
- - Unwrapped examples
-
-
- -
- Built-in aggregation operators
-
- Vector aggregation examples
-
-
-
-
-