Skip to content

Commit

Permalink
Issue #TG-375 feat: Added logback changes for LMS cassandra query eve…
Browse files Browse the repository at this point in the history
…nts (#1724)

* Issue #TG-234 feat: Added logback configuration for lms-service

* Issue #TG-234 feat: Added logback configuration for lms-service

* Issue #TG-234 feat: Added logback configuration for lms-service

* Issue #TG-234 feat: Added logback configuration for lms-service

* Issue #TG-234 feat: Added logback configuration for lms-service

* Issue #TG-234 feat: Added logback configuration for lms-service

* Issue #TG-375 feat: Added logback changes for cassandra query events

* Issue #TG-375 feat: Added kafka topic for query events logging

* Revert "Issue #TG-375 feat: Added kafka topic for query events logging"

This reverts commit 81e1734.
  • Loading branch information
Pradyumna authored Jul 17, 2020
1 parent c262466 commit 2d51cbb
Show file tree
Hide file tree
Showing 3 changed files with 106 additions and 4 deletions.
4 changes: 3 additions & 1 deletion ansible/roles/stack-sunbird/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,9 @@ sunbird_cert_qr_container_name: "certqr"

service_env:
learner: ../../../../ansible/roles/stack-sunbird/templates/sunbird_learner-service.env
lms: ../../../../ansible/roles/stack-sunbird/templates/sunbird_lms-service.env
lms:
- ../../../../ansible/roles/stack-sunbird/templates/sunbird_lms-service.env
- ../../../../ansible/roles/stack-sunbird/templates/lms-service_logback.xml
knowledgemw: ../../../../ansible/roles/stack-sunbird/templates/sunbird_knowledge-mw-service.env
apimanager: ../../../../ansible/roles/stack-api-manager/templates/api-manager.env
cert: ../../../../ansible/roles/stack-sunbird/templates/sunbird_cert-service.env
Expand Down
92 changes: 92 additions & 0 deletions ansible/roles/stack-sunbird/templates/lms-service_logback.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
<configuration>

<conversionRule conversionWord="coloredLevel" converterClass="play.api.libs.logback.ColoredLevel" />

<!-- transaction-event-trigger START -->
<timestamp key="timestamp" datePattern="yyyy-MM-dd"/>
<!-- common transactions logs -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d %msg%n</pattern>
</encoder>
</appender>

<appender name="ASYNCSTDOUT" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="STDOUT" />
</appender>


<logger name="play" level="INFO" />
<logger name="defaultLogger" level="INFO" />
<!-- Telemetry Loggers-->

<root level="INFO">
<appender-ref ref="ASYNCSTDOUT" />
</root>


<appender name="kafka-appender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%msg</pattern>
</encoder>

<topic>${sunbird_environment}.telemetry.raw</topic>
<!-- ensure that every message sent by the executing host is partitioned to the same partition strategy -->
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
<!-- block the logging application thread if the kafka appender cannot keep up with sending the log messages -->
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

<!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
<!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
<!-- bootstrap.servers is the only mandatory producerConfig -->
<producerConfig>bootstrap.servers=${kafka_urls}</producerConfig>
<!-- don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=15000</producerConfig>
<!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
<producerConfig>max.block.ms=0</producerConfig>
<!-- define a client-id that you use to identify yourself against the kafka broker -->
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>

<!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. -->

</appender>

<appender name="query-kafka-appender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%msg</pattern>
</encoder>

<topic>${sunbird_environment}.db.query.events</topic>
<!-- ensure that every message sent by the executing host is partitioned to the same partition strategy -->
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
<!-- block the logging application thread if the kafka appender cannot keep up with sending the log messages -->
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

<!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
<!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
<!-- bootstrap.servers is the only mandatory producerConfig -->
<producerConfig>bootstrap.servers=${kafka_urls}</producerConfig>
<!-- don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=15000</producerConfig>
<!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
<producerConfig>max.block.ms=0</producerConfig>
<!-- define a client-id that you use to identify yourself against the kafka broker -->
<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>

<!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. -->

</appender>

<logger name="TelemetryEventLogger" level="INFO">
<appender-ref ref="kafka-appender" />
</logger>

<logger name="queryLogger" level="DEBUG">
<appender-ref ref="query-kafka-appender" />
</logger>

</configuration>
14 changes: 11 additions & 3 deletions kubernetes/helm_charts/core/lms/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,17 +44,25 @@ spec:
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 10 }}
{{- end }}
volumeMounts:
- name: {{ .Chart.Name }}-xml-config
mountPath: /home/sunbird/lms/lms-service-1.0-SNAPSHOT/config/logback.xml
subPath: lms-service_logback.xml
{{- $keys := .Files.Glob "keys/*" }}
{{- if $keys }}
volumeMounts:
- mountPath: {{ .Values.lms_device_basepath }}
name: access-keys
{{- end }}
volumes:
- name: {{ .Chart.Name }}-xml-config
configMap:
name: {{ .Chart.Name }}-xml-config
{{- $keys := .Files.Glob "keys/*" }}
{{- if $keys }}
- name: access-keys
secret:
secretName: lms-access-keys
{{ end }}

{{- end }}
---
apiVersion: v1
kind: Service
Expand Down

0 comments on commit 2d51cbb

Please sign in to comment.