-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
148 lines (142 loc) · 5.73 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
version: "3"
networks:
kafka_workshop:
driver: bridge
services:
zeppelin:
hostname: zeppelin
container_name: zeppelin
image: larusefraudy/zeppelin:0.9.0
depends_on:
- neo4j
ports:
- "8080:8080"
- "4040:4040"
volumes:
- ./zeppelin/notebook:/zeppelin/notebook
- ./zeppelin/conf:/zeppelin/conf
- ./zeppelin/interpreter/neo4j:/zeppelin/interpreter/neo4j
networks:
- kafka_workshop
neo4j:
platform: linux/amd64
image: neo4j:4.3-enterprise
hostname: neo4j
container_name: neo4j
ports:
- 7474:7474
- 7687:7687
depends_on:
- zookeeper
- broker
volumes:
- ./neo4j/plugins:/plugins
environment:
NEO4J_ACCEPT_LICENSE_AGREEMENT: "yes"
NEO4J_AUTH: neo4j/password
NEO4J_dbms_memory_heap_max__size: 2G
NEO4J_dbms_logs_debug_level: DEBUG
NEO4J_apoc_export_file_enabled: "true"
NEO4J_kafka_zookeeper_connect: zookeeper:2181
NEO4J_kafka_bootstrap_servers: broker:9093
NEO4J_kafka_client_id: "neo4jClient"
NEO4J_kafka_group_id: "neo4jGroup"
NEO4J_kafka_key_deserializer: org.apache.kafka.common.serialization.ByteArrayDeserializer
NEO4J_kafka_value_deserializer: org.apache.kafka.common.serialization.ByteArrayDeserializer
NEO4J_streams_source_enabled: "true"
NEO4J_streams_sink_enabled: "true"
NEO4J_streams_source_topic_nodes_customer: Customer{customerID,contactName,companyName}
NEO4J_streams_source_topic_nodes_order: Order{orderID,shipCity,shipAddress}
NEO4J_streams_source_topic_nodes_product: Product{productID,productName}
NEO4J_streams_source_topic_nodes_people: Person{*}
NEO4J_streams_source_topic_relationships_knows: KNOWS{*}
NEO4J_streams_source_topic_relationships_purchased: PURCHASED{*}
NEO4J_streams_source_topic_relationships_orders: ORDERS{quantity,unitPrice}
NEO4J_streams_sink_topic_cypher_sales: "
MERGE (c:Customer {customerID: event.customer.customerID})
ON CREATE SET c.contactName = event.customer.contactName,
c.companyName = event.customer.companyName
MERGE (o:Order {orderID: event.order.orderID})
ON CREATE SET o.shipCity = event.order.shipCity,
o.shipAddress = event.order.shipAddress,
o.orderDate = localdatetime()
MERGE (p:Product {productID: event.product.productID})
ON CREATE SET p.productName = event.product.productName
MERGE (c)-[:PURCHASED]->(o)-[os:ORDERS {quantity: event.product.quantity, unitPrice: event.product.unitPrice}]->(p)"
networks:
- kafka_workshop
zookeeper:
image: confluentinc/cp-zookeeper
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
networks:
- kafka_workshop
broker:
image: confluentinc/cp-enterprise-kafka
hostname: broker
container_name: broker
depends_on:
- zookeeper
ports:
- "9092:9092"
expose:
- "9093"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:9093,OUTSIDE://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9093,OUTSIDE://0.0.0.0:9092
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:9093
# workaround if we change to a custom name the schema_registry fails to start
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: "true"
CONFLUENT_SUPPORT_CUSTOMER_ID: "anonymous"
networks:
- kafka_workshop
connect:
image: confluentinc/cp-kafka-connect
hostname: connect
container_name: connect
depends_on:
- zookeeper
- broker
ports:
- "8083:8083"
volumes:
- ./kafka-connect/plugins:/tmp/connect-plugins
environment:
CONNECT_BOOTSTRAP_SERVERS: "broker:9093"
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_ZOOKEEPER_CONNECT: "zookeeper:2181"
CONNECT_PLUGIN_PATH: /usr/share/java,/tmp/connect-plugins,/usr/share/confluent-hub-components
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=DEBUG,org.I0Itec.zkclient=DEBUG,org.reflections=ERROR,org.apache.kafka.connect.transforms
networks:
- kafka_workshop