Skip to content

Commit

Permalink
Added support for confluent schema-registry (#12)
Browse files Browse the repository at this point in the history
* Added support for confluent schema-registry

- Fixes #9
- Updated CHANGELOG
  • Loading branch information
neoword authored Jul 18, 2018
1 parent 2282b29 commit b582231
Show file tree
Hide file tree
Showing 7 changed files with 94 additions and 53 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# CHANGELOG

## v0.3
* Add support for confluent schema-registry (#12, #9)
* Clean kafka dir scripts added (#11)
* Add meta.constraint that is Kafka specific (#10)

## v0.2
* Add support for confluent kafka ( #7 )
* Add support for confluent kafka (#7)

## v0.1
* Initial version
Expand Down
18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,35 @@ Ensure the docker images are pre-loaded on each node.
## Confluent Zookeeper

```
> # to clean out zookeeper data dirs
> bin/clean-zk-dirs.sh
> # to start zookeeper
> bin/start.sh zk
> # to stop zookeeper
> bin/stop.sh zk
```

## Confluent Kafka

```
> # to clean out kafka data dirs
> bin/clean-kafka-dirs.sh
> # to start kafka
> bin/start.sh kafka
> # to stop kafka
> bin/stop.sh kafka
```

## Confluent Schema-Registry

```
> # to start schema-registry
> bin/start.sh schema-registry
> # to stop schema-registry
> bin/stop.sh schema-registry
```
7 changes: 7 additions & 0 deletions bin/clean-zk-dirs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

NODES='node2 node3 node4'
for node in ${NODES}; do
NODE_ID=`vagrant global-status | grep ${node} | awk '{print $1}'`
vagrant ssh ${NODE_ID} -c "sudo rm -rf /opt/zookeeper"
done
8 changes: 6 additions & 2 deletions bin/pull-images.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
#!/bin/bash

NODES='node2 node3 node4'
VERSION='4.1.1-2'
for node in ${NODES}; do
NODE_ID=`vagrant global-status | grep ${node} | awk '{print $1}'`
vagrant ssh ${NODE_ID} -c "docker pull confluentinc/cp-zookeeper:4.1.1-2"
vagrant ssh ${NODE_ID} -c "docker pull confluentinc/cp-kafka:4.1.1-2"
vagrant ssh ${NODE_ID} -c "docker pull confluentinc/cp-zookeeper:${VERSION}"
vagrant ssh ${NODE_ID} -c "docker pull confluentinc/cp-kafka:${VERSION}"
if [[ "${node}" =~ "node2" ]]; then
vagrant ssh ${NODE_ID} -c "docker pull confluentinc/cp-schema-registry:${VERSION}"
fi
done
19 changes: 11 additions & 8 deletions jobs/kafka.hcl
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Zookeeper
# Nomad job for kafka
# DISCLAIMER: This is intended for learning purposes only. It has not been tested for PRODUCTION environments.
job "kafka" {
region = "global"
datacenters = ["dc1"]
Expand Down Expand Up @@ -48,18 +49,19 @@ job "kafka" {
template {
data = <<EOT
# generated at deployment
CONFLUENT_VERSION = 4.1.1-2
{{$i := env "NOMAD_ALLOC_INDEX"}}
KAFKA_BROKER_ID = {{$i | parseInt | add 1}}
KAFKA_ZOOKEEPER_CONNECT = node2:2181,node3:2181,node4:2181
KAFKA_ADVERTISED_HOSTNAME = {{if eq $i "0"}}node2{{else}}{{if eq $i "1"}}node3{{else}}node4{{end}}{{end}}
KAFKA_ADVERTISED_LISTENERS = PLAINTEXT://node{{$i | parseInt | add 2}}:9092
KAFKA_DEFAULT_REPLICATION_FACTOR = 3
KAFKA_BROKER_ID={{$i | parseInt | add 1}}
KAFKA_ZOOKEEPER_CONNECT=node2:2181,node3:2181,node4:2181
KAFKA_ADVERTISED_HOSTNAME={{if eq $i "0"}}node2{{else}}{{if eq $i "1"}}node3{{else}}node4{{end}}{{end}}
KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://node{{$i | parseInt | add 2}}:9092
KAFKA_DEFAULT_REPLICATION_FACTOR=3
EOT
destination = "kafka-env/kafka.env"
env = true
}
config {
image = "confluentinc/cp-kafka:4.1.1-2"
image = "confluentinc/cp-kafka:${CONFLUENT_VERSION}"
hostname = "${KAFKA_ADVERTISED_HOSTNAME}"
labels {
group = "confluent-kafka"
Expand All @@ -82,13 +84,14 @@ job "kafka" {
cpu = 1000
memory = 512
network {
mbits = 10
mbits = 5
port "kafka" {
static = 9092
}
}
}
service {
name = "kafka"
tags = ["kafka"]
port = "kafka"
address_mode = "driver"
Expand Down
69 changes: 35 additions & 34 deletions jobs/schema-registry.hcl
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Zookeeper
job "kafka" {
# Nomad job for schema-registry
# DISCLAIMER: This is intended for learning purposes only. It has not been tested for PRODUCTION environments.

job "schema-registry" {
region = "global"
datacenters = ["dc1"]
type = "service"
Expand All @@ -19,17 +21,17 @@ job "kafka" {
# TODO - Need to add meta.kafka to be kafka specific (#8)
# TODO - Right now piggy-back on ZK meta. Need to add a separate, distinct kafka meta.
constraint {
attribute = "${meta.zookeeper}"
attribute = "${meta.schema-registry}"
value = "true"
}

# define group
group "kafka-group" {
group "sr-group" {

# define the number of times the tasks need to be executed
count = 3
count = 1

# ensure we are on 3 different nodes
# ensure we are on different nodes
constraint {
operator = "distinct_hosts"
value = "true"
Expand All @@ -43,24 +45,23 @@ job "kafka" {
mode = "fail"
}

task "kafka" {
task "schema-registry" {
driver = "docker"
template {
data = <<EOT
# generated at deployment
{{$i := env "NOMAD_ALLOC_INDEX"}}
KAFKA_BROKER_ID = {{$i | parseInt | add 1}}
KAFKA_ZOOKEEPER_CONNECT = node2:2181,node3:2181,node4:2181
KAFKA_ADVERTISED_HOSTNAME = {{if eq $i "0"}}node2{{else}}{{if eq $i "1"}}node3{{else}}node4{{end}}{{end}}
KAFKA_ADVERTISED_LISTENERS = PLAINTEXT://node{{$i | parseInt | add 2}}:9092
KAFKA_DEFAULT_REPLICATION_FACTOR = 3
CONFLUENT_VERSION=4.1.1-2
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL=192.168.33.11:2181,192.168.33.12:2181,192.168.33.13:2181
SCHEMA_REGISTRY_LISTENERS=http://0.0.0.0:8081
SCHEMA_REGISTRY_DEBUG=true
SCHEMA_REGISTRY_HOST_NAME=node2
SCHEMA_REGISTRY_AVRO_COMPATIBILITY_LEVEL=backward_transitive
EOT
destination = "kafka-env/kafka.env"
destination = "schema-registry-env/schema-registry.env"
env = true
}
config {
image = "confluentinc/cp-kafka:4.1.1-2"
hostname = "${KAFKA_ADVERTISED_HOSTNAME}"
image = "confluentinc/cp-schema-registry:${CONFLUENT_VERSION}"
labels {
group = "confluent-schema-registry"
}
Expand All @@ -71,39 +72,39 @@ job "kafka" {
"node4:192.168.33.13"
]
port_map {
kafka = 8081
sr = 8081
}
volumes = [
"/opt/schema-registry/data:/var/lib/schema-registry/data",
"/opt/schema-registry/secrets:/etc/schema-registry/secrets"
]
}
resources {
cpu = 1000
memory = 512
cpu = 200
memory = 256
network {
mbits = 10
mbits = 1
port "sr" {
static = 9092
static = 8081
}
}
}
service {
name = "schema-registry"
tags = ["schema-registry"]
port = "sr"
address_mode = "driver"
# TODO - Need to add a health check
# check {
# type = "tcp"
# port = "sr"
# interval = "10s"
# timeout = "2s"
# check_restart {
# limit = 3
# grace = "90s"
# ignore_warnings = false
# }
# }
check {
type = "http"
port = "sr"
path = "/"
interval = "10s"
timeout = "2s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
Expand Down
23 changes: 15 additions & 8 deletions jobs/zookeeper.hcl
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# Zookeeper
# Nomad job for zookeeper
# DISCLAIMER: This is intended for learning purposes only. It has not been tested for PRODUCTION environments.

job "zookeeper" {
region = "global"
datacenters = ["dc1"]
Expand Down Expand Up @@ -44,18 +46,22 @@ job "zookeeper" {
template {
data = <<EOT
# generated at deployment
CONFLUENT_VERSION = 4.1.1-2
{{$i := env "NOMAD_ALLOC_INDEX"}}
ZOOKEEPER_SERVER_ID = {{$i | parseInt | add 1}}
ZOOKEEPER_SERVERS = {{if eq $i "0"}}0.0.0.0:2888:3888;192.168.33.12:2888:3888;192.168.33.13:2888:3888{{else}}{{if eq $i "1"}}192.168.33.11:2888:3888;0.0.0.0:2888:3888;192.168.33.13:2888:3888{{else}}192.168.33.11:2888:3888;192.168.33.12:2888:3888;0.0.0.0:2888:3888{{end}}{{end}}
ZOOKEEPER_HOST = {{if eq $i "0"}}node2{{else}}{{if eq $i "1"}}node3{{else}}node4{{end}}{{end}}
ZOOKEEPER_IP = {{if eq $i "0"}}192.168.33.11{{else}}{{if eq $i "1"}}192.168.33.12{{else}}192.168.33.13{{end}}{{end}}
ZOOKEEPER_CLIENT_PORT = 2181
ZOOKEEPER_SERVER_ID={{$i | parseInt | add 1}}
ZOOKEEPER_SERVERS={{if eq $i "0"}}0.0.0.0:2888:3888;192.168.33.12:2888:3888;192.168.33.13:2888:3888{{else}}{{if eq $i "1"}}192.168.33.11:2888:3888;0.0.0.0:2888:3888;192.168.33.13:2888:3888{{else}}192.168.33.11:2888:3888;192.168.33.12:2888:3888;0.0.0.0:2888:3888{{end}}{{end}}
ZOOKEEPER_HOST={{if eq $i "0"}}node2{{else}}{{if eq $i "1"}}node3{{else}}node4{{end}}{{end}}
ZOOKEEPER_IP={{if eq $i "0"}}192.168.33.11{{else}}{{if eq $i "1"}}192.168.33.12{{else}}192.168.33.13{{end}}{{end}}
ZOOKEEPER_CLIENT_PORT=2181
ZOOKEEPER_TICK_TIME=2000
ZOOKEEPER_SYNC_LIMIT=20
ZOOKEEPER_INIT_LIMIT=10
EOT
destination = "zk-env/zookeeper.env"
env = true
}
config {
image = "confluentinc/cp-zookeeper:4.1.1-2"
image = "confluentinc/cp-zookeeper:${CONFLUENT_VERSION}"
hostname = "${ZOOKEEPER_HOST}"
labels {
group = "confluent-zk"
Expand All @@ -80,7 +86,7 @@ job "zookeeper" {
cpu = 200
memory = 256
network {
mbits = 10
mbits = 1
port "zk" {
static = 2181
}
Expand All @@ -93,6 +99,7 @@ job "zookeeper" {
}
}
service {
name = "zookeeper"
tags = ["zookeeper"]
port = "zk"
address_mode = "driver"
Expand Down

0 comments on commit b582231

Please sign in to comment.