Skip to content

Commit

Permalink
temperary remove profileStatusPerNode until otel support reset metric
Browse files Browse the repository at this point in the history
  • Loading branch information
UgOrange committed Nov 13, 2024
1 parent 1f358f6 commit 6385f04
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 55 deletions.
12 changes: 6 additions & 6 deletions cmd/varmor/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,10 @@ var (
webhookMatchLabel string
bpfExclusiveMode bool
enableMetrics bool
syncMetricsSecond int
statusUpdateCycle time.Duration
auditLogPaths string
setupLog = log.Log.WithName("SETUP")
//syncMetricsSecond int
statusUpdateCycle time.Duration
auditLogPaths string
setupLog = log.Log.WithName("SETUP")
)

func main() {
Expand All @@ -92,7 +92,7 @@ func main() {
flag.DurationVar(&statusUpdateCycle, "statusUpdateCycle", time.Hour*2, "Configure the status update cycle for VarmorPolicy and ArmorProfile")
flag.StringVar(&auditLogPaths, "auditLogPaths", "/var/log/audit/audit.log|/var/log/kern.log", "Configure the file search list to select the audit log file and read the AppArmor and Seccomp audit events. Please use a vertical bar to separate the file paths, the first valid file will be used to track the audit events.")
flag.BoolVar(&enableMetrics, "enableMetrics", false, "Set this flag to enable metrics.")
flag.IntVar(&syncMetricsSecond, "syncMetricsSecond", 10, "Configure the profile metric update seconds")
//flag.IntVar(&syncMetricsSecond, "syncMetricsSecond", 10, "Configure the profile metric update seconds")
flag.Parse()

// Set the webhook matchLabels configuration.
Expand Down Expand Up @@ -150,7 +150,7 @@ func main() {
}

// init a metrics
metricsModule := metrics.NewMetricsModule(log.Log.WithName("METRICS"), enableMetrics, syncMetricsSecond)
metricsModule := metrics.NewMetricsModule(log.Log.WithName("METRICS"), enableMetrics, 10)

if agent {
setupLog.Info("vArmor agent startup")
Expand Down
34 changes: 17 additions & 17 deletions internal/status/api/v1/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,21 +52,21 @@ type StatusManager struct {
PolicyStatuses map[string]varmortypes.PolicyStatus
// Use "namespace/VarmorPolicyName" as key. One VarmorPolicy object corresponds to one ModelingStatus
// TODO: Rebuild ModelingStatuses from ArmorProfile object when leader change occurs.
ModelingStatuses map[string]varmortypes.ModelingStatus
ResetCh chan string
DeleteCh chan string
UpdateStatusCh chan string
UpdateModeCh chan string
statusQueue workqueue.RateLimitingInterface
dataQueue workqueue.RateLimitingInterface
statusUpdateCycle time.Duration
debug bool
log logr.Logger
metricsModule *varmormetrics.MetricsModule
profileSuccess metric.Float64Counter
profileFailure metric.Float64Counter
profileChangeCount metric.Float64Counter
profileStatusPerNode metric.Float64Gauge
ModelingStatuses map[string]varmortypes.ModelingStatus
ResetCh chan string
DeleteCh chan string
UpdateStatusCh chan string
UpdateModeCh chan string
statusQueue workqueue.RateLimitingInterface
dataQueue workqueue.RateLimitingInterface
statusUpdateCycle time.Duration
debug bool
log logr.Logger
metricsModule *varmormetrics.MetricsModule
profileSuccess metric.Float64Counter
profileFailure metric.Float64Counter
profileChangeCount metric.Float64Counter
//profileStatusPerNode metric.Float64Gauge
}

func NewStatusManager(coreInterface corev1.CoreV1Interface,
Expand Down Expand Up @@ -99,7 +99,7 @@ func NewStatusManager(coreInterface corev1.CoreV1Interface,
m.profileSuccess = metricsModule.RegisterFloat64Counter("profile_processing_success", "Number of successful profile processing")
m.profileFailure = metricsModule.RegisterFloat64Counter("profile_processing_failure", "Number of failed profile processing")
m.profileChangeCount = metricsModule.RegisterFloat64Counter("profile_change_count", "Number of profile change")
m.profileStatusPerNode = metricsModule.RegisterFloat64Gauge("profile_status_per_node", "Profile status per node (1=success, 0=failure)")
//m.profileStatusPerNode = metricsModule.RegisterFloat64Gauge("profile_status_per_node", "Profile status per node (1=success, 0=failure)")
}

return &m
Expand Down Expand Up @@ -653,7 +653,7 @@ func (m *StatusManager) Run(stopCh <-chan struct{}) {
go m.reconcileStatus(stopCh)
go wait.Until(m.statusWorker, time.Second, stopCh)
go wait.Until(m.dataWorker, time.Second, stopCh)
go m.syncStatusMetricsLoop()
//go m.syncStatusMetricsLoop()
<-stopCh
}

Expand Down
61 changes: 31 additions & 30 deletions internal/status/api/v1/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,36 +88,37 @@ func (m *StatusManager) HandleProfileStatusUpdate(status varmortypes.ProfileStat
m.profileChangeCount.Add(ctx, 1, metric.WithAttributeSet(attrSet))
}

func (m *StatusManager) syncStatusMetricsLoop() {
ctx := context.Background()
for {
time.Sleep(time.Duration(m.metricsModule.Refresh) * time.Second)
logger := m.log.WithName("syncStatusMetricsLoop()")
logger.Info("start syncing status metrics")
m.profileStatusPerNode = m.metricsModule.RegisterFloat64Gauge("profile_status_per_node", "Profile status per node (1=success, 0=failure)")
for key, status := range m.PolicyStatuses {
namespace, name, err := PolicyStatusKeyGetInfo(key)
if err != nil {
logger.Error(err, "PolicyStatusKeyGetInfo()")
continue
}
for nodeName, nodeMessage := range status.NodeMessages {
labels := []attribute.KeyValue{
attribute.String("namespace", namespace),
attribute.String("profile_name", name),
attribute.String("node_name", nodeName),
attribute.Int64("timestamp", time.Now().Unix()),
}
attrSet := attribute.NewSet(labels...)
if nodeMessage == string(varmortypes.ArmorProfileReady) {
m.profileStatusPerNode.Record(ctx, 1, metric.WithAttributeSet(attrSet)) // 1 mean success
} else {
m.profileStatusPerNode.Record(ctx, 0, metric.WithAttributeSet(attrSet)) // 0 mean failure
}
}
}
}
}
// disable syncStatusMetricsLoop until otel support clear metrics
//func (m *StatusManager) syncStatusMetricsLoop() {
// ctx := context.Background()
// for {
// time.Sleep(time.Duration(m.metricsModule.Refresh) * time.Second)
// logger := m.log.WithName("syncStatusMetricsLoop()")
// logger.Info("start syncing status metrics")
// m.profileStatusPerNode = m.metricsModule.RegisterFloat64Gauge("profile_status_per_node", "Profile status per node (1=success, 0=failure)")
// for key, status := range m.PolicyStatuses {
// namespace, name, err := PolicyStatusKeyGetInfo(key)
// if err != nil {
// logger.Error(err, "PolicyStatusKeyGetInfo()")
// continue
// }
// for nodeName, nodeMessage := range status.NodeMessages {
// labels := []attribute.KeyValue{
// attribute.String("namespace", namespace),
// attribute.String("profile_name", name),
// attribute.String("node_name", nodeName),
// attribute.Int64("timestamp", time.Now().Unix()),
// }
// attrSet := attribute.NewSet(labels...)
// if nodeMessage == string(varmortypes.ArmorProfileReady) {
// m.profileStatusPerNode.Record(ctx, 1, metric.WithAttributeSet(attrSet)) // 1 mean success
// } else {
// m.profileStatusPerNode.Record(ctx, 0, metric.WithAttributeSet(attrSet)) // 0 mean failure
// }
// }
// }
// }
//}

// updatePolicyStatus update StatusManager.PolicyStatuses[statusKey] with profileStatus which comes from agent.
func (m *StatusManager) updatePolicyStatus(statusKey string, profileStatus *varmortypes.ProfileStatus) error {
Expand Down
1 change: 0 additions & 1 deletion manifests/varmor/templates/deployments/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ spec:
{{- if .Values.metrics.enabled }}
{{- with .Values.manager.metrics.args }}
{{- toYaml . | nindent 8 }}
- --syncMetricsSecond={{ $.Values.metrics.syncMetricsSecond }}
{{- end }}
{{- end }}
{{- end }}
Expand Down
2 changes: 1 addition & 1 deletion manifests/varmor/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ bpfLsmEnforcer:
metrics:
enabled: false
serviceMonitorEnabled: false
syncMetricsSecond: 10
# syncMetricsSecond: 10

restartExistWorkloads:
enabled: true
Expand Down

0 comments on commit 6385f04

Please sign in to comment.