Skip to content

Commit

Permalink
Add complete producer and consumer examples
Browse files Browse the repository at this point in the history
  • Loading branch information
Ishiihara committed Apr 23, 2015
1 parent 3fd4551 commit 4d49946
Show file tree
Hide file tree
Showing 8 changed files with 664 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
target
.idea/
*.iml
58 changes: 58 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
Examples
========

This repository includes projects demonstrating how to use the Java Kafka producer
and consumer. You can find detailed explanation of the code at the
[application development section](http://confluent.io/docs/current/app-development.html)
of the Confluent Platform documentation.

To build the producer project

$ cd producer
$ mvn clean package

To build the consumer project

$ cd consumer
$ mvn clean package

Quickstart
----------

Before running the examples, make sure that Zookeeper, Kafka and Schema Registry are
running. In what follows, we assume that Zookeeper, Kafka and Schema Registry are
started with the default settings.

# Start Zookeeper
$ bin/zookeeper-server-start config/zookeeper.properties

# Start Kafka
$ bin/kafka-server-start config/server.properties

# Start Schema Registry
$ bin/schema-registry-start config/schema-registry.properties

Then create a topic called page_visits:

# Create page_visits topic
$ bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 \
--partitions 1 --topic page_visits

Next, cd to the `examples` directory:

$ cd examples

First cd to the `producer` directory, then run the example producer to publish 10 records.

# Run the producer
$ cd producer
$ mvn exec:java -Dexec.mainClass="io.confluent.examples.producer.ProducerExample" \
-Dexec.args="10 http://localhost:8081"

Then cd to the `consumer` directory, and run the consumer group example to consume
the records we just published to the cluster and display in the console.

# Run the consumer
$ cd ../consumer
$ mvn exec:java -Dexec.mainClass="io.confluent.examples.consumer.ConsumerGroupExample" \
-Dexec.args="localhost:2181 group page_visits 1 http://localhost:8081"
65 changes: 65 additions & 0 deletions consumer/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<groupId>io.confluent</groupId>
<artifactId>consumer-example</artifactId>
<packaging>jar</packaging>
<version>2.0-SNAPSHOT</version>

<repositories>
<repository>
<id>confluent</id>
<url>http://packages.confluent.io/maven/</url>
</repository>
</repositories>

<properties>
<kafka.version>0.8.2.1</kafka.version>
<kafka.scala.version>2.10</kafka.scala.version>
<confluent.version>1.0</confluent.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>

<dependencies>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-avro-serializer</artifactId>
<version>${confluent.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_${kafka.scala.version}</artifactId>
<version>${kafka.version}</version>
</dependency>
</dependencies>

<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.5.1</version>
<inherited>true</inherited>
<configuration>
<source>1.6</source>
<target>1.6</target>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2.1</version>
<executions>
<execution>
<goals>
<goal>java</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
/**
* Copyright 2015 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.confluent.examples.consumer;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import io.confluent.kafka.serializers.KafkaAvroDecoder;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.utils.VerifiableProperties;

public class ConsumerGroupExample {
private final ConsumerConnector consumer;
private final String topic;
private ExecutorService executor;
private String zookeeper;
private String groupId;
private String url;

public ConsumerGroupExample(String zookeeper, String groupId, String topic, String url) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
new ConsumerConfig(createConsumerConfig(zookeeper, groupId, url)));
this.topic = topic;
this.zookeeper = zookeeper;
this.groupId = groupId;
this.url = url;
}

private Properties createConsumerConfig(String zookeeper, String groupId, String url) {
Properties props = new Properties();
props.put("zookeeper.connect", zookeeper);
props.put("group.id", groupId);
props.put("auto.commit.enable", "false");
props.put("auto.offset.reset", "smallest");
props.put("schema.registry.url", url);

return props;
}

public void run(int numThreads) {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, numThreads);

Properties props = createConsumerConfig(zookeeper, groupId, url);
VerifiableProperties vProps = new VerifiableProperties(props);

// Create decoders for key and value
KafkaAvroDecoder avroDecoder = new KafkaAvroDecoder(vProps);

Map<String, List<KafkaStream<Object, Object>>> consumerMap =
consumer.createMessageStreams(topicCountMap, avroDecoder, avroDecoder);
List<KafkaStream<Object, Object>> streams = consumerMap.get(topic);

// Launch all the threads
executor = Executors.newFixedThreadPool(numThreads);

// Create ConsumerLogic objects and bind them to threads
int threadNumber = 0;
for (final KafkaStream stream : streams) {
executor.submit(new ConsumerLogic(stream, threadNumber));
threadNumber++;
}
}

public void shutdown() {
if (consumer != null) consumer.shutdown();
if (executor != null) executor.shutdown();
try {
if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
System.out.println(
"Timed out waiting for consumer threads to shut down, exiting uncleanly");
}
} catch (InterruptedException e) {
System.out.println("Interrupted during shutdown, exiting uncleanly");
}
}

public static void main(String[] args) {
if (args.length != 5) {
System.out.println("Please provide command line arguments: "
+ "zookeeper groupId topic threads schemaRegistryUrl");
System.exit(-1);
}

String zooKeeper = args[0];
String groupId = args[1];
String topic = args[2];
int threads = Integer.parseInt(args[3]);
String url = args[4];

ConsumerGroupExample example = new ConsumerGroupExample(zooKeeper, groupId, topic, url);
example.run(threads);

try {
Thread.sleep(10000);
} catch (InterruptedException ie) {

}
example.shutdown();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
/**
* Copyright 2015 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.confluent.examples.consumer;

import org.apache.avro.generic.GenericRecord;

import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.message.MessageAndMetadata;

public class ConsumerLogic implements Runnable {
private KafkaStream stream;
private int threadNumber;

public ConsumerLogic(KafkaStream stream, int threadNumber) {
this.threadNumber = threadNumber;
this.stream = stream;
}

public void run() {
ConsumerIterator<Object, Object> it = stream.iterator();

while (it.hasNext()) {
MessageAndMetadata<Object, Object> record = it.next();

String topic = record.topic();
int partition = record.partition();
long offset = record.offset();
Object key = record.key();
GenericRecord message = (GenericRecord) record.message();
System.out.println("Thread " + threadNumber +
" received: " + "Topic " + topic +
" Partition " + partition +
" Offset " + offset +
" Key " + key +
" Message " + message.toString());
}
System.out.println("Shutting down Thread: " + threadNumber);
}
}
Loading

0 comments on commit 4d49946

Please sign in to comment.