-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathlogs.alloy
164 lines (141 loc) · 5.51 KB
/
logs.alloy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
declare "from_worker" {
argument "targets" {
comment = "Must be a list() of targets"
}
argument "forward_to" {
comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
}
export "receiver" {
value = loki.process.parse.receiver
}
discovery.relabel "worker_logs" {
targets = argument.targets.value
// set the __path__, this is automatically translated as a label of filename (which should be dropped or normalized)
// DO NOT delete this line as it is needed to tail the pod logs on the node
rule {
action = "replace"
separator = "/"
source_labels = [
"__meta_kubernetes_pod_uid",
"__meta_kubernetes_pod_container_name",
]
replacement = "/var/log/pods/*$1/*.log"
target_label = "__path__"
}
// set the __host__
rule {
action = "replace"
source_labels = ["__meta_kubernetes_pod_node_name"]
target_label = "__host__"
}
// as a result of kubernetes service discovery for pods, all of the meta data information is exposed in labels
// __meta_kubernetes_pod_*, including __meta_kubernetes_pod_container_id which can be used to determine what
// the pods container runtime is, docker (docker://...) or containerd (containerd://...) this will inform us
// which parsing stage to use. However, any labels that begin with __* are not passed to loki.process
// (pipeline) stages. Use a relabeling stage to set a label that can be used a LogQL selector in the stage
// below so parsing can be automatically determined, then drop the label from the loki.process stage.
// set the container runtime as a label
rule {
action = "replace"
source_labels = ["__meta_kubernetes_pod_container_id"]
regex = "^(\\S+):\\/\\/.+$"
replacement = "$1"
target_label = "tmp_container_runtime"
}
// make all labels on the pod available to the pipeline as labels,
// they are omitted before write via labelallow unless explicitly set
rule {
action = "labelmap"
regex = "__meta_kubernetes_pod_label_(.+)"
}
// make all annotations on the pod available to the pipeline as labels,
// they are omitted before write via labelallow unless explicitly set
rule {
action = "labelmap"
regex = "__meta_kubernetes_pod_annotation_(.+)"
}
}
// find eligible files on the worker
local.file_match "pods" {
path_targets = discovery.relabel.worker_logs.output
}
// tail the files
loki.source.file "pods" {
targets = local.file_match.pods.targets
forward_to = [loki.process.parse.receiver]
}
// parse the log based on the container runtime
loki.process "parse" {
forward_to = argument.forward_to.value
/*******************************************************************************
* Container Runtime Parsing
********************************************************************************/
// if the label tmp_container_runtime from above is containerd parse using cri
stage.match {
selector = "{tmp_container_runtime=~\"containerd|cri-o\"}"
// the cri processing stage extracts the following k/v pairs: log, stream, time, flags
stage.cri {}
// Set the extract flags and stream values as labels
stage.labels {
values = {
flags = "",
stream = "",
}
}
}
// if the label tmp_container_runtime from above is docker parse using docker
stage.match {
selector = "{tmp_container_runtime=\"docker\"}"
// the docker processing stage extracts the following k/v pairs: log, stream, time
stage.docker {}
// Set the extract stream value as a label
stage.labels {
values = {
stream = "",
}
}
}
// drop the temporary container runtime label as it is no longer needed
stage.label_drop {
values = ["tmp_container_runtime"]
}
}
}
// declare "api" {
//
// }
declare "filename_normalize" {
argument "forward_to" {
comment = "Must be a list(LogsReceiver) where collected logs should be forwarded to"
}
export "receiver" {
value = loki.process.normalize_filename.receiver
}
loki.process "normalize_filename" {
forward_to = argument.forward_to.value
/*******************************************************************************
* Normalize Filename
*******************************************************************************
Normalize the filename, the label "filename" is automatically created from discovered files in the matching path based on the
__path__ label from the relabel_configs. This has extremely high cardinality, it can be useful for a pod with multiple
containers/sidecars to know where the log came from but we can greatly reduce the cardinality.
Example:
Filename: /var/log/pods/agents_agent-logs-grafana-agent-k8hpm_5cafa323-a7ed-4703-9220-640d3e44a5e3/config-reloader/0.log
Becomes: /var/log/pods/agents/agent-logs-grafana-agent/config-reloader.log
*/
stage.regex {
// unescaped regex: ^(?P<path>\/([^\/_]+\/)+)[^\/]+\/(?P<container_folder>[^\/]+)\/[0-9]+\.log
expression = "^(?P<path>\\/([^\\/_]+\\/)+)[^\\/]+\\/(?P<container_folder>[^\\/]+)\\/[0-9]+\\.log"
source = "filename"
}
stage.template {
source = "normalized_filename"
template = "{{ .path }}{{ .job }}/{{ .container_folder }}.log"
}
stage.labels {
values = {
filename = "normalized_filename",
}
}
}
}