Skip to content

Testing PR 4163: Divergent TLS Config for Controller and Segment Store

Ravi Sharda edited this page Sep 20, 2019 · 1 revision

Verifying the changes made for Pravega PR 4163

Building the Docker Image

Build image using ./gradlew docker command. Note the generated image version by inspecting the output of command docker images.

Deploying a Cluster with Controller TLS Off and Segment Store TLS On

Docker Compose Cluster

  1. Prepare a YAML manifest file, like the one shown below. Use the appropriate Pravega and Bookeeper image version (0.6.0-2353...).

    In this deployment, we are enabling auth (short for authentication and authorization) for both Controller and Segment Store, but enabling TLS only for segment store.

version: "3"
services:
zookeeper:
  image: zookeeper:3.5.4-beta
  ports:
    - "2181:2181"

hdfs:
  image: pravega/hdfs:2.7.7
  ports:
    - "2222:2222"
    - "8020:8020"
    - "50090:50090"
    - "50010:50010"
    - "50020:50020"
    - "50075:50075"
    - "50070:50070"
  environment:
    SSH_PORT: 2222
    HDFS_HOST: ${HOST_IP}

bookie1:
  image: pravega/bookkeeper:0.6.0-2353.1bb7d64-SNAPSHOT
  ports:
    - "3181:3181"
  restart: always
  environment:
    ZK_URL: zookeeper:2181
    bookiePort: 3181
  links:
    - zookeeper

bookie2:
  image: pravega/bookkeeper:0.6.0-2353.1bb7d64-SNAPSHOT
  ports:
    - "3182:3182"
  restart: always
  environment:
    ZK_URL: zookeeper:2181
    bookiePort: 3182
  links:
    - zookeeper

bookie3:
  image: pravega/bookkeeper:0.6.0-2353.1bb7d64-SNAPSHOT
  ports:
    - "3183:3183"
  restart: always
  environment:
    ZK_URL: zookeeper:2181
    bookiePort: 3183
  links:
    - zookeeper

controller:
  image: pravega/pravega:0.6.0-2353.1bb7d64-SNAPSHOT
  ports:
    - "9090:9090"
    - "10080:10080"
  command: controller
  environment:
    WAIT_FOR: zookeeper:2181
    ZK_URL: zookeeper:2181
    REST_SERVER_PORT: 10080
    JAVA_OPTS: |
      -Dlog.level="DEBUG"
      -Dcontroller.auth.segmentStoreTlsEnabled="true"
      -Dcontroller.service.port=9090
      -Dcontroller.auth.tlsEnabled=false
      -Dcontroller.auth.tlsCertFile="/pki/server-cert.crt"
      -Dcontroller.auth.tlsTrustStore="/pki/ca-cert.crt"
      -Dcontroller.auth.tlsKeyFile="/pki/server-key.key"
      -Dcontroller.zk.secureConnection=false
      -Dcontroller.zk.tlsTrustStoreFile="/pki/zk.truststore.jks"
      -Dcontroller.zk.tlsTrustStorePasswordFile="/pki/zk.truststore.jks.password"
      -Dcontroller.rest.tlsKeyStoreFile="/pki/server.keystore.jks"
      -Dcontroller.rest.tlsKeyStorePasswordFile="/pki/server.keystore.jks.passwd"
      -Dcontroller.auth.enabled=true
      -Dcontroller.auth.userPasswordFile="/opt/pravega/conf/passwd"
      -Dconfig.controller.metricenableCSVReporter=false
      -Dcontroller.auth.tokenSigningKey=secret
      -Xmx512m
      -XX:OnError="kill -9 p%"
      -XX:+ExitOnOutOfMemoryError
      -XX:+CrashOnOutOfMemoryError
      -XX:+HeapDumpOnOutOfMemoryError
    SERVICE_HOST_IP: segmentstore
  volumes:
    - /pravega/docker-compose/pki:/pki  
  links:
    - zookeeper

segmentstore:
  image: pravega/pravega:0.6.0-2353.1bb7d64-SNAPSHOT
  ports:
    - "12345:12345"
  command: segmentstore
  depends_on: 
    - hdfs
  environment:
    WAIT_FOR: bookie1:3181,bookie2:3182,bookie3:3183,hdfs:8020
    TIER2_STORAGE: "HDFS"
    HDFS_REPLICATION: 1
    HDFS_URL: ${HOST_IP}:8020
    ZK_URL: zookeeper:2181
    CONTROLLER_URL: tcp://${HOST_IP}:9090
    JAVA_OPTS: |
      -Dlog.level="DEBUG"
      -Dpravegaservice.enableTls=true
      -Dpravegaservice.enableTlsReload=false
      -Dpravegaservice.certFile="/pki/server-cert.crt"
      -Dpravegaservice.keyFile="/pki/server-key.key"
      -Dpravegaservice.secureZK=false
      -Dpravegaservice.zkTrustStore="/pki/zk.truststore.jks"
      -Dpravegaservice.zkTrustStorePasswordPath="/pki/zk.truststore.jks.password"
      -DautoScale.tlsEnabled=true
      -DautoScale.tlsCertFile="/pki/server-cert.crt"
      -DautoScale.validateHostName=false
      -DautoScale.authEnabled=true
      -DautoScale.tokenSigningKey=secret
      -Dbookkeeper.tlsEnabled=false
      -Dbookkeeper.tlsTrustStorePath="/pki/bk.truststore.jks"
      -Dmetrics.enableCSVReporter=false
      -Dpravegaservice.publishedIPAddress=${HOST_IP}
      -Dbookkeeper.bkEnsembleSize=2
      -Dbookkeeper.bkAckQuorumSize=2
      -Dbookkeeper.bkWriteQuorumSize=2
      -Dpravega.client.auth.token="YWRtaW46MTExMV9hYWFh"
      -Dpravega.client.auth.method="Basic"
      -Xmx900m
      -XX:OnError="kill -9 p%"
      -XX:+ExitOnOutOfMemoryError
      -XX:+CrashOnOutOfMemoryError
      -XX:+HeapDumpOnOutOfMemoryError
  volumes:
    - /pravega/docker-compose/pki:/pki    
  links:
    - zookeeper
    - hdfs
    - bookie1
    - bookie2
    - bookie3
  1. Create an environment variable pointing to the current host IP address: $ export HOST_IP=<host_IP>

  2. Change directory to the one containing docker-compose.yml file: $ cd /path/to/docker-compose-manifest-file

  3. Deploy the application specified in the docker-compose.yml file: $ docker-compose up -d

  4. Verify that the application is up and running. The output of the following command should print the response body shown below.

    $ curl -v -u admin:1111_aaaa http://$HOST_IP:10080/v1/scopes 
    

    Expected Output:

    {"scopes":[{"scopeName":"_system"}]}
    

Kubernetes Cluster

The Pravega operator manifest looked like this:

apiVersion: "pravega.pravega.io/v1alpha1"
kind: "PravegaCluster"
metadata:
  name: "pravega"
spec:
  version: <Pravega-Version>
  zookeeperUri: pravega-zookeeper-client:2181
  externalAccess:
    enabled: true
    type: LoadBalancer
  tls:
    static:
      controllerSecret: "controller-tls"
      segmentStoreSecret: "segmentstore-tls"
  bookkeeper:
    replicas: 1
    image:
      repository: pravega/bookkeeper
      pullPolicy: IfNotPresent
    storage:
      ledgerVolumeClaimTemplate:
        accessModes: [ "ReadWriteOnce" ]
        storageClassName: "default"
        resources:
          requests:
            storage: 10Gi

      journalVolumeClaimTemplate:
        accessModes: [ "ReadWriteOnce" ]
        storageClassName: "default"
        resources:
          requests:
            storage: 10Gi
    autoRecovery: true
    serviceAccountName: pravega-components

  pravega:
    controllerReplicas: 1
    segmentStoreReplicas: 1
    controllerServiceAccountName: pravega-components
    segmentStoreServiceAccountName: pravega-components
    cacheVolumeClaimTemplate:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "default"
      resources:
        requests:
          storage: 20Gi
    image:
      repository: pravega/pravega
      pullPolicy: IfNotPresent
    tier2:
      filesystem:
        persistentVolumeClaim:
          claimName: pravega-tier2
    options:
        log.level: "DEBUG"
        controller.auth.tlsEnabled: "false"
        controller.auth.segmentStoreTlsEnabled: "true"
        controller.auth.tlsCertFile: "/etc/secret-volume/controllerTlsCertFile"
        controller.auth.tlsTrustStore: "/etc/secret-volume/controllerCacert"
        controller.auth.tlsKeyFile: "/etc/secret-volume/controllerTlsKeyFile"
        controller.zk.secureConnection: "false"
        controller.zk.tlsTrustStoreFile: "empty"
        controller.zk.tlsTrustStorePasswordFile: "empty"
        controller.rest.tlsKeyStoreFile: "/etc/secret-volume/controllerTlsKeyStoreFile"
        controller.rest.tlsKeyStorePasswordFile: "/etc/secret-volume/passwordfile"
        controller.auth.enabled: "true"
        controller.auth.userPasswordFile: "/opt/pravega/conf/passwd"
        controller.auth.tokenSigningKey: "secret"
        pravegaservice.enableTls: "true"
        pravegaservice.enableTlsReload: "false"
        pravegaservice.certFile: "/etc/secret-volume/segmentstoreTlsCertFile"
        pravegaservice.keyFile: "/etc/secret-volume/segmentstoreTlsKeyFile"
        pravegaservice.secureZK: "false"
        pravegaservice.zkTrustStore: "empty"
        pravegaservice.zkTrustStorePasswordPath: "empty"
        autoScale.tlsEnabled: "true"
        autoScale.tlsCertFile: "/etc/secret-volume/segmentstoreTlsCertFile"
        autoScale.validateHostName: "false"
        autoScale.authEnabled: "true"
        autoScale.tokenSigningKey: "secret"
        bookkeeper.tlsEnabled: "false"
        bookkeeper.tlsTrustStorePath: "empty"
        pravega.client.auth.token: "YWRtaW46MTExMV9hYWFh"
        pravega.client.auth.method: "Basic"

Client Writer and Reader Application for Verifying Writes and Reads

import io.pravega.client.ClientConfig;
import io.pravega.client.ClientFactory;
import io.pravega.client.admin.ReaderGroupManager;
import io.pravega.client.admin.StreamManager;
import io.pravega.client.stream.*;
import io.pravega.client.stream.impl.DefaultCredentials;
import io.pravega.client.stream.impl.JavaSerializer;
import lombok.Cleanup;

import java.net.URI;
import java.util.UUID;
import java.util.concurrent.ExecutionException;

public class MixedModeWriterAndReaderExample {
    public static void main(String... args) throws ExecutionException, InterruptedException {
        String scope = "org.example.auth";
        String streamName = "stream";
        int numSegments = 10;

        ClientConfig clientConfig = ClientConfig.builder()
                .credentials(new DefaultCredentials("1111_aaaa", "admin"))                
                 // Note the non-secure scheme
                .controllerURI(URI.create("tcp://<controller-IP>:9090"))
                 // We still need these for communication with Segment Store
                .trustStore("/path/to/segmentstoreca/ca.crt")
                .validateHostName(false)
                 // Tells the client to talk to segment store over TLS even if the controller URL 
                 // has an insecure scheme.
                .enableTlsToSegmentStore(true)
                .build();
        System.out.println("Done creating client config");

        @Cleanup
        StreamManager streamManager = StreamManager.create(clientConfig);
        System.out.println("Created a stream manager");

        streamManager.createScope(scope);
        System.out.println("Created a scope: " + scope);

        streamManager.createStream(scope, streamName, StreamConfiguration.builder()
                .scalingPolicy(ScalingPolicy.fixed(numSegments))
                .build());
        System.out.println("Created stream: " + streamName);

        @Cleanup
        ClientFactory clientFactory = ClientFactory.withScope(scope, clientConfig);

        @Cleanup
        EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName,
                new JavaSerializer<String>(),
                EventWriterConfig.builder().build());
        System.out.println("Got a writer");

        writer.writeEvent("Hello-1").get();
        writer.writeEvent("Hello-2").get();
        System.out.println("Wrote data to the stream");

        // Now, read the events from the stream.

        String readerGroup = UUID.randomUUID().toString().replace("-", "");
        ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder()
                .stream(Stream.of(scope, streamName))
                .disableAutomaticCheckpoints()
                .build();

        @Cleanup
        ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, clientConfig);
        readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig);

        @Cleanup
        EventStreamReader<String> reader = clientFactory.createReader(
                "readerId", readerGroup,
                new JavaSerializer<String>(), ReaderConfig.builder().build());

        // Keeping the read timeout large so that there is ample time for reading the event even in
        // case of abnormal delays in test environments.
        String readEvent1 = reader.readNextEvent(1000).getEvent();
        System.out.println(String.format("Done reading event [%s]", readEvent1));

        String readEvent2 = reader.readNextEvent(1000).getEvent();
        System.out.println(String.format("Done reading event [%s]", readEvent2));
    }
}
Clone this wiki locally