diff --git a/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss b/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss index 5299425cb..3d0ef869d 100644 --- a/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss +++ b/nezha-fronted/src/assets/css/components/page/dashboard/explore/explore.scss @@ -256,7 +256,7 @@ } .doc-content > h1 { color: #e6522c; - font-size: 22px; + font-size: 24px; } .doc-content > h2 { @@ -343,6 +343,7 @@ article { /*外部引用 样式end*/ .explore .introduce-view .title-heard.info-room{ + position: relative; .logs-content { font-size: 16px; ul li{ @@ -356,6 +357,10 @@ article { padding: 5px; background-color: $--background-color-empty; } + .log-link:hover{ + cursor: pointer; + border-bottom: 1px solid #3C92F1; + } .fillbox{ display: block; line-height: 25px; @@ -368,7 +373,7 @@ article { } > h1,.page-header { color: #e6522c; - font-size: 22px; + font-size: 24px; font-weight: 600; text-transform: uppercase; margin-top: 15px; @@ -378,6 +383,20 @@ article { text-decoration: none; } } + > h1,.page-header-one { + color: #e6522c; + font-size: 22px; + font-weight: 600; + margin-top: 15px; + text-transform: none; + } + > h1,.page-header-two { + color: #e6522c; + font-size: 20px; + font-weight: 600; + margin-top: 15px; + text-transform: none; + } .title-heard__divider{ margin: 5px 0 5px 0; } @@ -416,4 +435,20 @@ article { } } } + .catalog{ + position: absolute; + top: 75px; + right: 25px; + background-color: $--background-color-empty; + padding: 15px 10px; + ul li{ + margin: 3px 14px 3px 24px; + list-style: circle; + color: #3C92F1; + } + span:hover{ + cursor: pointer; + border-bottom: 1px solid #3C92F1; + } + } } diff --git a/nezha-fronted/src/components/chart/chart/chartBubble.vue b/nezha-fronted/src/components/chart/chart/chartBubble.vue index 1ed1f5abe..7fd117587 100644 --- a/nezha-fronted/src/components/chart/chart/chartBubble.vue +++ b/nezha-fronted/src/components/chart/chart/chartBubble.vue @@ -110,6 +110,9 @@ export default { this.$nextTick(() => { d3.select(`#bubble-svg-${this.chartId}`).selectAll('g').remove()// 清空作图区域 const svg = document.getElementById(`bubble-svg-${this.chartId}`) + if (!svg) { + return false + } const width = svg.getBoundingClientRect().width const height = svg.getBoundingClientRect().height // 定义布局方式 diff --git a/nezha-fronted/src/components/chart/chart/chartSankey.vue b/nezha-fronted/src/components/chart/chart/chartSankey.vue index 85b6f0f71..05cc55b72 100644 --- a/nezha-fronted/src/components/chart/chart/chartSankey.vue +++ b/nezha-fronted/src/components/chart/chart/chartSankey.vue @@ -141,8 +141,11 @@ export default { // 获取svg宽高 初始化画布 const svgDom = document.getElementById(`sankey-svg-${this.chartId}`) - const width = svgDom && svgDom.getBoundingClientRect().width - const height = svgDom && svgDom.getBoundingClientRect().height + if (!svgDom) { + return false + } + const width = svgDom.getBoundingClientRect().width + const height = svgDom.getBoundingClientRect().height const margin1 = 100 const margin2 = 50 const svg = d3.select(`#sankey-svg-${this.chartId}`) diff --git a/nezha-fronted/src/components/common/js/validate.js b/nezha-fronted/src/components/common/js/validate.js index 7d5c2cf50..b9fe266ef 100644 --- a/nezha-fronted/src/components/common/js/validate.js +++ b/nezha-fronted/src/components/common/js/validate.js @@ -214,7 +214,7 @@ export function sysObjectIdInput (rule, value, callback) { if (sysId.test(value)) { callback() } else { - callback(new Error(vm.$t('validate.sysId'))) + callback(new Error(vm.$t('overall.oid'))) } }, 100) } diff --git a/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue b/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue index 0a6c82e36..d65b6fd97 100644 --- a/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue +++ b/nezha-fronted/src/components/page/dashboard/explore/exploreItem.vue @@ -283,23 +283,23 @@ instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="c
LogQL is Grafana Loki’s PromQL-inspired query language. Queries act as if they are a distributed grep to aggregate log sources. LogQL uses labels and operators for filtering.
There are two types of LogQL queries:
All LogQL queries contain a log stream selector.
-
Optionally, the log stream selector can be followed by a log pipeline. A log pipeline is a set of stage expressions that are chained together and applied to the selected log streams. Each expression can filter out, parse, or mutate log lines and their respective labels.
The following example shows a full log query in action:
{container="query-frontend",namespace="loki-dev"} |= "metrics.go" | logfmt | duration > 10s and throughput_mb < 500
The stream selector determines which log streams to include in a query’s results. A log stream is a unique source of log content, such as a file. A more granular log stream selector then reduces the number of searched streams to a manageable volume. This means that the labels passed to the log stream selector will affect the relative performance of the query’s execution.
The log stream selector is specified by one or more comma-separated key-value pairs. Each key is a log label and each value is that label’s value. Curly braces ({ and }) delimit the stream selector.
A log pipeline can be appended to a log stream selector to further process and filter log streams. It is composed of a set of expressions. Each expression is executed in left to right sequence for each log line. If an expression filters out a log line, the pipeline will stop processing the current log line and start processing the next log line.
Some expressions can mutate the log content and respective labels, which will be then be available for further filtering and processing in subsequent expressions. An example that mutates is the expression
| line_format "{{.status_code}}"
Log pipeline expressions fall into one of three categories:
{instance=~"kafka-[23]",name="kafka"} != "kafka.server:type=ReplicaManager"
Keep log lines that contain a substring that starts with tsdb-ops and ends with io:2003. A complete query with a regular expression:
+Keep log lines that contain a substring that starts with tsdb-ops and ends with io:2003. A complete query with a regular expression:
{name="kafka"} |~ "tsdb-ops.*io:2003"
Keep log lines that contain a substring that starts with error=, and is followed by 1 or more word characters. A complete query with a regular expression:
+Keep log lines that contain a substring that starts with error=, and is followed by 1 or more word characters. A complete query with a regular expression:
{name="cassandra"} |~ `error=\w+`
250, 89.923.String type work exactly like Prometheus label matchers use in log stream selector. This means you can use the same operations (=,!=,=~,!~).
String type work exactly like Prometheus label matchers use in log stream selector. This means you can use the same operations (=,!=,=~,!~).
The string type is the only one that can filter out a log line with a label __error__.
Using Duration, Number and Bytes will convert the label value prior to comparision and support the following comparators:
Parser expression can parse and extract labels from the log content. Those extracted labels can then be used for filtering using label filter expressions or for metric aggregations.
+Parser expression can parse and extract labels from the log content. Those extracted labels can then be used for filtering using label filter expressions or for metric aggregations.
Extracted label keys are automatically sanitized by all parsers, to follow Prometheus metric name convention.(They can only contain ASCII letters and digits, as well as underscores and colons. They cannot start with a digit.)
For instance, the pipeline | json will produce the following mapping:
{ "a.b": {c: "d"}, e: "f" }
->
{a_b_c="d", e="f"}
In case of errors, for instance if the line is not in the expected format, the log line won’t be filtered but instead will get a new __error__ label added.
If an extracted label key name already exists in the original log stream, the extracted label key will be suffixed with the _extracted keyword to make the distinction between the two labels. You can forcefully override the original label using a label formatter expression. However if an extracted key appears twice, only the latest label value will be kept.
Loki supports JSON, logfmt, pattern, regexp and unpack parsers.
-It’s easier to use the predefined parsers json and logfmt when you can. If you can’t, the pattern and regexp parsers can be used for log lines with an unusual structure. The pattern parser is easier and faster to write; it also outperforms the regexp parser. Multiple parsers can be used by a single log pipeline. This is useful for parsing complex logs. There are examples in Multiple parsers.
If an extracted label key name already exists in the original log stream, the extracted label key will be suffixed with the _extracted keyword to make the distinction between the two labels. You can forcefully override the original label using a label formatter expression. However if an extracted key appears twice, only the latest label value will be kept.
Loki supports JSON, logfmt, pattern, regexp and unpack parsers.
+It’s easier to use the predefined parsers json and logfmt when you can. If you can’t, the pattern and regexp parsers can be used for log lines with an unusual structure. The pattern parser is easier and faster to write; it also outperforms the regexp parser. Multiple parsers can be used by a single log pipeline. This is useful for parsing complex logs. There are examples in Multiple parsers.
Filtering should be done first using label matchers, then line filters (when possible) and finally using label filters. The following query demonstrate this.
{cluster="ops-tools1", namespace="loki-dev", job="loki-dev/query-frontend"} |= "metrics.go" !="out of order" | logfmt | duration > 30s or status_code!="200"
The following query shows how you can reformat a log line to make it easier to read on screen.
{cluster="ops-tools1", name="querier", namespace="loki-dev"}
|= "metrics.go" != "loki-canary"
| logfmt
| query != ""
- | label_format query="{{ Replace .query \"\\n\" \"\" -1 }}"
- | line_format "{{ .ts}}\t{{.duration}}\ttraceID = {{.traceID}}\t{{ printf \"%-100.100s\" .query }} "
+ | label_format query="{{ Replace .query \"\\n\" \"\" -1 }}"
+ | line_format "{{ .ts}}\t{{.duration}}\ttraceID = {{.traceID}}\t{{ printf \"%-100.100s\" .query }} "
Label formatting is used to sanitize the query while the line format reduce the amount of information and creates a tabular output.
For these given log lines:
level=info ts=2020-10-23T20:32:18.094668233Z caller=metrics.go:81 org_id=29 traceID=1980d41501b57b68 latency=fast query="{cluster=\"ops-tools1\", job=\"loki-ops/query-frontend\"} |= \"query_range\"" query_type=filter range_type=range length=15m0s step=7s duration=650.22401ms status=200 throughput_mb=1.529717 total_bytes_mb=0.994659
@@ -689,7 +689,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
- Multiple parsers
+ Metric queries
Metric queries extend log queries by applying a function to log query results. This powerful feature creates metrics from logs.
Metric queries can be used to calculate the rate of error messages or the top N log sources with the greatest quantity of logs over the last 3 hours.
@@ -698,7 +698,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
- Range Vector aggregation
+ Range Vector aggregation
LogQL shares the range vector concept of Prometheus. In Grafana Loki, the selected range of samples is a range of selected log or label values.
The aggregation is applied over a time duration. Loki defines Time Durations with the same syntax as Prometheus.
@@ -707,7 +707,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
- Log range aggregations
+ Log range aggregations
A log range aggregation is a query followed by a duration. A function is applied to aggregate the query over the duration. The duration can be placed after the log stream selector or at end of the log pipeline.
The functions:
@@ -733,7 +733,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
- Unwrapped range aggregations
+ Unwrapped range aggregations
Unwrapped ranges uses extracted labels as sample values instead of log lines. However to select which label will be used within the aggregation, the log query must end with an unwrap expression and optionally a label filter expression to discard errors.
The unwrap expression is noted | unwrap label_identifier where the label identifier is the label name to use for extracting sample values.
@@ -766,7 +766,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
- Unwrapped examples
+ Unwrapped examples
quantile_over_time(0.99,
{cluster="ops-tools1",container="ingress-nginx"}
@@ -786,7 +786,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
- Built-in aggregation operators
+ Built-in aggregation operators
Like PromQL, LogQL supports a subset of built-in aggregation operators that can be used to aggregate the element of a single vector, resulting in a new vector of fewer elements but with aggregated values:
@@ -808,7 +808,7 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
- Vector aggregation examples
+ Vector aggregation examples
Get the top 10 applications by the highest log throughput:
topk(10,sum(rate({region="us-east1"}[5m])) by (name))
@@ -819,6 +819,56 @@ level=info ts=2020-10-23T20:32:18.068866235Z caller=metrics.go:81 org_id=29 trac
+
+
+ -
+ Log queries
+
+ - Log stream selector
+ -
+ Log pipeline
+
+ - Line filter expression
+ - Label filter expression
+ - Parser expression
+ - JSON
+ - logfmt
+ - Pattern
+ - Regular expression
+ - unpack
+ - Line format expression
+ - Labels format expression
+
+
+ -
+ Log queries examples
+
+ - Multiple filtering
+ - Multiple parsers
+ - Formatting
+
+
+
+
+ -
+ Metric queries
+
+ -
+ Range Vector aggregation
+
+ - Log range aggregations
+ - Unwrapped range aggregations
+ - Unwrapped examples
+
+
+ -
+ Built-in aggregation operators
+
- Vector aggregation examples
+
+
+
+
+
diff --git a/nezha-fronted/src/entrance/app/App.vue b/nezha-fronted/src/entrance/app/App.vue
index e995500be..b9c58c69c 100644
--- a/nezha-fronted/src/entrance/app/App.vue
+++ b/nezha-fronted/src/entrance/app/App.vue
@@ -1,7 +1,7 @@
-
+
diff --git a/nezha-fronted/src/http.js b/nezha-fronted/src/http.js
index ee835a5b1..384e901de 100644
--- a/nezha-fronted/src/http.js
+++ b/nezha-fronted/src/http.js
@@ -1,5 +1,7 @@
-import { getUUID } from './components/common/js/common'
+
+import router from './router'
import axios from 'axios'
+import { getUUID } from './components/common/js/common'
const CancelToken = axios.CancelToken // 申明CancelToken
export const requestsArr = []
// 清除掉请求完成的实例 防止占用内存
@@ -83,6 +85,7 @@ axios.interceptors.response.use(
window.location.href = '/'
} else if (response.status === 200) {
if (accountErrorCode.indexOf(response.data.code) !== -1 && noJumpPath.indexOf(window.location.hash) == -1) {
+ sessionStorage.setItem('nz-previous-page', router.currentRoute.fullPath)
window.location.href = '/'
}
} else {
diff --git a/nezha-fronted/src/permission.js b/nezha-fronted/src/permission.js
index 58c61a525..7fe83cab8 100644
--- a/nezha-fronted/src/permission.js
+++ b/nezha-fronted/src/permission.js
@@ -149,6 +149,7 @@ router.beforeEach((to, from, next) => {
resolve()
} else {
localStorage.removeItem('nz-token')
+ sessionStorage.setItem('nz-previous-page', to.fullPath)
next({ path: '/login' })
}
})
@@ -179,6 +180,7 @@ router.beforeEach((to, from, next) => {
if (loginWhiteList.indexOf(to.path) !== -1) {
next()
} else {
+ sessionStorage.setItem('nz-previous-page', to.fullPath)
next({ path: '/login' })
}
}
diff --git a/nezha-fronted/src/store/user.js b/nezha-fronted/src/store/user.js
index 023b6cc3b..fe4ecfd02 100644
--- a/nezha-fronted/src/store/user.js
+++ b/nezha-fronted/src/store/user.js
@@ -142,12 +142,22 @@ const user = {
}
})
}
- router.push({
- path: path[0],
- query: {
- t: +new Date()
- }
- })
+ // 登录成功后跳回到原来页面
+ if (sessionStorage.getItem('nz-previous-page')) {
+ const route = sessionStorage.getItem('nz-previous-page')
+ router.push({
+ path: route
+ }).then(() => {
+ sessionStorage.removeItem('nz-previous-page')
+ })
+ } else {
+ router.push({
+ path: path[0],
+ query: {
+ t: +new Date()
+ }
+ })
+ }
})
get('/sys/user/profile').then(response => {
if (response.code === 200) {