diff --git a/xml/app_psps.xml b/xml/app_psps.xml index 7eb9f1ec..160716a1 100644 --- a/xml/app_psps.xml +++ b/xml/app_psps.xml @@ -15,8 +15,8 @@ &productname; 1.3.1 introduces built-in support for Pod Security Policies (PSPs), which are provided via &helm; charts and are set up automatically, - unlike older releases which require manual PSP setup. &suse; &caasp; and - Microsoft AKS both require PSPs for ∩ to operate correctly. This section + unlike older releases which require manual PSP setup. &caasp; and + &aks; both require PSPs for ∩ to operate correctly. This section provides instructions for configuring and applying the appropriate PSPs to older ∩ releases. diff --git a/xml/app_scf_values_yaml.xml b/xml/app_scf_values_yaml.xml index 0a4dd0fd..79ed2951 100644 --- a/xml/app_scf_values_yaml.xml +++ b/xml/app_scf_values_yaml.xml @@ -1044,7 +1044,7 @@ sizing: # SCF patches for upstream bugs # # - cloud_controller_ng: The Cloud Controller provides primary Cloud Foundry - # API that is by the CF CLI. The Cloud Controller uses a database to keep + # API that is by the &cfcli;. The Cloud Controller uses a database to keep # tables for organizations, spaces, apps, services, service instances, user # roles, and more. Typically multiple instances of Cloud Controller are load # balanced. diff --git a/xml/cap_admin_app_autoscaler.xml b/xml/cap_admin_app_autoscaler.xml index 6c723552..3bd6021f 100644 --- a/xml/cap_admin_app_autoscaler.xml +++ b/xml/cap_admin_app_autoscaler.xml @@ -40,7 +40,7 @@ - The Cloud Foundry CLI AutoScaler Plug-in, see + The &cf; CLI AutoScaler Plug-in, see The plugin can be installed by running the following command: diff --git a/xml/cap_admin_backup-restore.xml b/xml/cap_admin_backup-restore.xml index 7f97a84d..92f339ff 100644 --- a/xml/cap_admin_backup-restore.xml +++ b/xml/cap_admin_backup-restore.xml @@ -62,7 +62,7 @@ Buildpacks are not saved. Applications using custom buildpacks not - available on the target SUSE Cloud Foundry instance will not be restored. + available on the target &scf; instance will not be restored. Custom buildpacks must be managed separately, and relevant buildpacks must be in place before the affected applications are restored. @@ -673,7 +673,7 @@ defined on the source cluster. This value is defined in /var/vcap/jobs/cloud_controller_ng/config/cloud_controller_ng.yml of the api-group-0 pod and also found in various - tables of the MySQL database. + tables of the &mysql; database. Begin by examining the configuration file for diff --git a/xml/cap_admin_buildpacks.xml b/xml/cap_admin_buildpacks.xml index dcce5a39..e30b26b9 100644 --- a/xml/cap_admin_buildpacks.xml +++ b/xml/cap_admin_buildpacks.xml @@ -360,14 +360,14 @@ another_ruby_buildpack 11 true false ruby-buildpack-v1.7.41.1- Offline buildpacks can be created using the cf-buildpack-packager-docker tool, which is available as a - Docker image. The only - requirement to use this tool is a system with Docker support. + &docker; image. The only + requirement to use this tool is a system with &docker; support. Disclaimer - Some Cloud Foundry buildpacks can reference binaries with proprietary or + Some &cf; buildpacks can reference binaries with proprietary or mutually incompatible open source licenses which cannot be distributed together as offline/cached buildpack archives. Operators who wish to package and maintain offline buildpacks will be responsible for any @@ -432,7 +432,7 @@ another_ruby_buildpack 11 true false ruby-buildpack-v1.7.41.1- - Build the latest released SUSE Ruby buildpack for the &sle; 15 stack: + Build the latest released &suse; Ruby buildpack for the &sle; 15 stack: &prompt.user;docker run --interactive --tty --rm -v $PWD:/out splatform/cf-buildpack-packager SUSE ruby "" sle15 diff --git a/xml/cap_admin_ccdb_key_rotation.xml b/xml/cap_admin_ccdb_key_rotation.xml index b45ff8e6..bfaffeb2 100644 --- a/xml/cap_admin_ccdb_key_rotation.xml +++ b/xml/cap_admin_ccdb_key_rotation.xml @@ -159,7 +159,7 @@ MariaDB [(none)]> select name, encrypted_environment_variables, encryption_key_l droplets - May contain Docker registry passwords + May contain &docker; registry passwords @@ -175,7 +175,7 @@ MariaDB [(none)]> select name, encrypted_environment_variables, encryption_key_l packages - May contain Docker registry passwords + May contain &docker; registry passwords @@ -250,7 +250,7 @@ MariaDB [(none)]> select name, encrypted_environment_variables, encryption_key_l cf delete, then cf push the app - (Docker apps with registry password) + (&docker; apps with registry password) diff --git a/xml/cap_admin_credhub.xml b/xml/cap_admin_credhub.xml index 84fd3d20..87f90fff 100644 --- a/xml/cap_admin_credhub.xml +++ b/xml/cap_admin_credhub.xml @@ -81,7 +81,7 @@ On occasion, the credhub pod may fail to start due to database migration failures; this has been spotted intermittently on - Azure Kubernetes Service and to a lesser extent, other public clouds. + &aks-full; and to a lesser extent, other public clouds. In these situations, manual intervention is required to track the last completed transaction in credhub_user database and update the flyway schema history table with the record of the last diff --git a/xml/cap_admin_minibroker.xml b/xml/cap_admin_minibroker.xml index 507fc8dc..68abee14 100644 --- a/xml/cap_admin_minibroker.xml +++ b/xml/cap_admin_minibroker.xml @@ -20,7 +20,7 @@ OSBAPI compliant broker created by members of the Microsoft Azure team. It - provides a simple method to provision service brokers on Kubernetes clusters. + provides a simple method to provision service brokers on &kube; clusters. Minibroker Upstream Services @@ -40,7 +40,7 @@ - Minibroker is deployed using a &helm; chart. Ensure your SUSE &helm; chart + Minibroker is deployed using a &helm; chart. Ensure your &suse; &helm; chart repository contains the most recent Minibroker chart: &prompt.user;helm repo update @@ -66,7 +66,7 @@ - MariaDB + &mariadb; 4.3.0 10.1.34 @@ -76,7 +76,7 @@ 4.0.6 - PostgreSQL + &postgresql; 6.2.1 11.5.0 @@ -187,7 +187,7 @@ TIP: Use 'cf marketplace -s SERVICE' to view descriptions of individual plans o - MariaDB + &mariadb; 3306 @@ -195,7 +195,7 @@ TIP: Use 'cf marketplace -s SERVICE' to view descriptions of individual plans o 27017 - PostgreSQL + &postgresql; 5432 @@ -276,9 +276,9 @@ minibroker-minibroker-5865f66bb8-6dxm7 2/2 Running 0 - Database Names for PostgreSQL and MariaDB Instances + Database Names for &postgresql; and &mariadb; Instances - By default, Minibroker creates PostgreSQL and MariaDB server instances + By default, Minibroker creates &postgresql; and &mariadb; server instances without a named database. A named database is required for normal usage with these and will need to be added during the cf create-service step using the -c flag. For diff --git a/xml/cap_admin_passwords.xml b/xml/cap_admin_passwords.xml index ae9c4164..0ea034af 100644 --- a/xml/cap_admin_passwords.xml +++ b/xml/cap_admin_passwords.xml @@ -38,13 +38,13 @@ - User logins are created (and removed) with the Cloud Foundry Client, + User logins are created (and removed) with the &cf; Client, &cfcli; - Password Management with the Cloud Foundry Client + Password Management with the &cf; Client The administrator cannot change other users' passwords. Only users may diff --git a/xml/cap_admin_service_broker.xml b/xml/cap_admin_service_broker.xml index 2cc4c8e4..c8d0ef7a 100644 --- a/xml/cap_admin_service_broker.xml +++ b/xml/cap_admin_service_broker.xml @@ -85,15 +85,15 @@ Prerequisites - The following examples demonstrate how to deploy service brokers for MySQL - and PostgreSQL with &helm;, using charts from the &suse; repository. You + The following examples demonstrate how to deploy service brokers for &mysql; + and &postgresql; with &helm;, using charts from the &suse; repository. You must have the following prerequisites: - A working &productname; deployment with &helm; and the Cloud Foundry + A working &productname; deployment with &helm; and the &cf; command line interface (&cfcli;). @@ -107,7 +107,7 @@ - An external MySQL or PostgreSQL installation with account credentials that + An external &mysql; or &postgresql; installation with account credentials that allow creating and deleting databases and users. @@ -162,7 +162,7 @@ - Configuring the MySQL Deployment + Configuring the &mysql; Deployment Start by extracting the uaa namespace secrets name, and @@ -241,10 +241,10 @@ kube: password: "" - Deploying the MySQL Chart + Deploying the &mysql; Chart - &productname; includes charts for MySQL and PostgreSQL (see + &productname; includes charts for &mysql; and &postgresql; (see for information on managing your &helm; repository): @@ -252,13 +252,13 @@ kube: &helm-search-suse; - Create a namespace for your MySQL sidecar: + Create a namespace for your &mysql; sidecar: &prompt.user;kubectl create namespace mysql-sidecar - Install the MySQL Helm chart: + Install the &mysql; Helm chart: &prompt.user;helm install suse/cf-usb-sidecar-mysql \ @@ -277,18 +277,17 @@ kube: &prompt.user;watch kubectl get pods --namespace=mysql-sidecar - Confirm that the new service has been added to your SUSE Cloud Applications - Platform installation: + Confirm that the new service has been added to your &productname; installation: &prompt.user;cf marketplace - MySQL Requires <literal>mysql_native_password</literal> + &mysql; Requires <literal>mysql_native_password</literal> - The MySQL sidecar works only with deployments that use + The &mysql; sidecar works only with deployments that use mysql_native_password as their authentication plugin. - This is the default for MySQL versions 8.0.3 and earlier, but later + This is the default for &mysql; versions 8.0.3 and earlier, but later versions must be started with --default-auth=mysql_native_password before any user creation. (See @@ -297,7 +296,7 @@ kube: - Create and Bind a MySQL Service + Create and Bind a &mysql; Service To create a new service instance, use the &cf; command line client: @@ -317,10 +316,10 @@ kube: &prompt.user;cf bind-service my_application service_instance_name - Deploying the PostgreSQL Chart + Deploying the &postgresql; Chart - The PostgreSQL configuration is slightly different from the MySQL + The &postgresql; configuration is slightly different from the &mysql; configuration. The database-specific keys are named differently, and it requires the SERVICE_POSTGRESQL_SSLMODE key. @@ -385,7 +384,7 @@ kube: --wait - Then follow the same steps as for the MySQL chart. + Then follow the same steps as for the &mysql; chart. @@ -407,7 +406,7 @@ kube: - Install the CF-USB CLI plugin for the Cloud Foundry CLI from + Install the CF-USB CLI plugin for the &cf; CLI from , for example: @@ -416,7 +415,7 @@ kube: - Configure the Cloud Foundry USB CLI plugin, using the domain you created + Configure the &cf; USB CLI plugin, using the domain you created for your &scf; deployment: &prompt.user;cf usb-target https://usb.example.com diff --git a/xml/cap_depl_admin_notes.xml b/xml/cap_depl_admin_notes.xml index a14fbb8d..08421915 100644 --- a/xml/cap_depl_admin_notes.xml +++ b/xml/cap_depl_admin_notes.xml @@ -51,9 +51,9 @@ The following tables list the minimum DNS requirements to run &productname;, using example.com as the example domain. Your DNS management is - platform-dependent, for example Microsoft AKS assigns IP addresses to your - services, which you will map to A records. Amazon EKS assigns host names, - which you will use to create CNAMEs. &suse; &caasp; provides the flexibility + platform-dependent, for example &aks; assigns IP addresses to your + services, which you will map to A records. &eks; assigns host names, + which you will use to create CNAMEs. &caasp; provides the flexibility to manage your name services in nearly any way you wish. The chapters for each platform in this guide provide the relevant DNS instructions. diff --git a/xml/cap_depl_air_gap_registry.xml b/xml/cap_depl_air_gap_registry.xml index 4c4f0459..2c5cfee1 100644 --- a/xml/cap_depl_air_gap_registry.xml +++ b/xml/cap_depl_air_gap_registry.xml @@ -19,8 +19,8 @@ &readmefirst; - ∩, which consists of Docker images, is deployed to a &kube; cluster - through &helm;. These images are hosted on a Docker registry at + ∩, which consists of &docker; images, is deployed to a &kube; cluster + through &helm;. These images are hosted on a &docker; registry at registry.suse.com. In an air gapped environment, registry.suse.com will not be accessible. You will need to create a registry, and populate it will the images used by ∩. @@ -39,15 +39,15 @@ - The Docker Command Line. See + The &docker; Command Line. See for more information. - A Docker registry has been created in your air gapped environment. Refer to - the Docker documentation at + A &docker; registry has been created in your air gapped environment. Refer to + the &docker; documentation at for instructions. diff --git a/xml/cap_depl_aks.xml b/xml/cap_depl_aks.xml index 34c897ff..b78350b1 100644 --- a/xml/cap_depl_aks.xml +++ b/xml/cap_depl_aks.xml @@ -200,7 +200,8 @@ cap-aks Set the Azure location. See - Quotas and region availability for Azure Kubernetes Service (AKS) + Quotas, virtual machine size restrictions, and region availability + in Azure Kubernetes Service (AKS) for supported locations. Run az account list-locations to verify the correct way to spell your location name, for example East US is eastus in your az commands: @@ -375,7 +376,7 @@ kube-system tunnelfront-595565bc78-j8msn 1/1 Running 0 6m Identify and set the cluster resource group, then enable kernel swap accounting. Swap accounting is required by ∩, but it is not the default in AKS nodes. The following commands use the az command - to modify the GRUB configuration on each node, and then reboot the virtual + to modify the &grub; configuration on each node, and then reboot the virtual machines. @@ -821,11 +822,11 @@ tcp-router-tcp-router-public LoadBalancer 10.0.132.203 23.96. - Service Broker Installation Fails on AKS Cluster Running Kubernetes 1.11.8 + Service Broker Installation Fails on &aks; Cluster Running &kube; 1.11.8 If installation of open-service-broker-azure is unsuccessful due to Failed to pull image "osbapublicacr.azurecr.io/microsoft/azure-service-broker:v1.6.0, - upgrade your AKS cluster to &kube; 1.11.9. See for + upgrade your &aks; cluster to &kube; 1.11.9. See for instructions. @@ -956,7 +957,7 @@ awk '($2 ~ /basic/) { system("cf enable-service-access " $1 " -p " $2)}' . - Modify the GRUB configuration of each node to enable swap accounting and then + Modify the &grub; configuration of each node to enable swap accounting and then reboot all nodes. RG_NAME and AKS_NAME are the values set earlier in this chapter and can also be obtained through the AKS web dashboard. diff --git a/xml/cap_depl_eirini.xml b/xml/cap_depl_eirini.xml index 38c419d0..0aa5f193 100644 --- a/xml/cap_depl_eirini.xml +++ b/xml/cap_depl_eirini.xml @@ -38,16 +38,14 @@ As of &scf; 2.18.0, cf push with Eirini does not work on - Amazon Elastic Container Service for Kubernetes and Google Kubernetes - Engine (GKE) by default. To get cf push to work with - Amazon Elastic Container Service for Kubernetes and GKE, you need to apply + &eks-full; and &gke-full; by default. To get cf push to work with + &eks-full; and &gke;, you need to apply a workaround of deleting a webhook by doing the following: &prompt.user;kubectl delete mutatingwebhookconfigurations eirini-x-mutating-hook-eirini Deleting the webhook means that the eirini-persi service - would not be available. Note that this workaround is not needed on Azure - Kubernetes Service. + would not be available. Note that this workaround is not needed on &aks-full;. @@ -75,7 +73,7 @@ Eirini is not compatible with &kube; environments running CRI-O at this - time. Please use Docker runtime instead for now. + time. Please use &docker; runtime instead for now. @@ -129,16 +127,14 @@ env: As of &scf; 2.18.0, cf push with Eirini does not work on - Amazon Elastic Container Service for Kubernetes and Google Kubernetes - Engine (GKE) by default. To get cf push to work with - Amazon Elastic Container Service for Kubernetes and GKE, you need to apply + &eks-full; and &gke-full; by default. To get cf push to work with + &aks; and &gke;, you need to apply a workaround of deleting a webhook by doing the following: &prompt.user;kubectl delete mutatingwebhookconfigurations eirini-x-mutating-hook-eirini Deleting the webhook means that the eirini-persi service - would not be available. Note that this workaround is not needed on Azure - Kubernetes Service. + would not be available. Note that this workaround is not needed on &aks-full;. diff --git a/xml/cap_depl_eks.xml b/xml/cap_depl_eks.xml index 8a397e06..4b901fc7 100644 --- a/xml/cap_depl_eks.xml +++ b/xml/cap_depl_eks.xml @@ -656,7 +656,7 @@ suse https://kubernetes-charts.suse.com/ Install the AWS service broker as documented at . - Skip the installation of the Kubernetes Service Catalog. While installing + Skip the installation of the &kube; Service Catalog. While installing the AWS Service Broker, make sure to update the &helm; chart version (the version as of this writing is 1.0.0-beta.3). For the broker install, pass in a value indicating the Cluster Service Broker @@ -740,7 +740,7 @@ suse https://kubernetes-charts.suse.com/ - Create a service instance. As an example, a custom MySQL instance can be + Create a service instance. As an example, a custom &mysql; instance can be created as: &prompt.user;cf create-service rdsmysql custom mysql-instance-name -c '{ diff --git a/xml/cap_depl_gke.xml b/xml/cap_depl_gke.xml index 1db833b4..fa9ce389 100644 --- a/xml/cap_depl_gke.xml +++ b/xml/cap_depl_gke.xml @@ -61,7 +61,7 @@ - Access to a GCP project with the Kubernetes Engine API enabled. If a + Access to a GCP project with the &kube; Engine API enabled. If a project needs to be created, refer to . To enable access to the API, refer to @@ -230,7 +230,7 @@ - Update the kernel command line and GRUB then restart the virtual machines: + Update the kernel command line and &grub; then restart the virtual machines: &prompt.user;echo "$instance_names" | xargs -i{} gcloud compute ssh {} -- "sudo sed --in-place 's/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0\"/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0 cgroup_enable=memory swapaccount=1\"/g' /etc/default/grub.d/50-cloudimg-settings.cfg && sudo update-grub && sudo systemctl reboot -i" @@ -252,7 +252,7 @@ When inside the node, run the command to update the kernel command line and - GRUB, then restart the node. + &grub;, then restart the node. &prompt.user;sudo sed --in-place 's/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0\"/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0 cgroup_enable=memory swapaccount=1\"/g' /etc/default/grub.d/50-cloudimg-settings.cfg && sudo update-grub && sudo systemctl reboot -i @@ -783,7 +783,7 @@ tcp-router-tcp-router-public LoadBalancer 10.23.241.17 35.197 The GCP Service Broker requires a database to store information about the - resources it provisions. Any database that adheres to the MySQL protocol + resources it provisions. Any database that adheres to the &mysql; protocol may be used, but it is recommended to use a GCP Cloud SQL instance, as outlined in the following steps. @@ -806,13 +806,13 @@ tcp-router-tcp-router-public LoadBalancer 10.23.241.17 35.197 - Click Choose MySQL to select MySQL as the database + Click Choose &mysql; to select &mysql; as the database engine. - In the Instance ID field, enter an identifier for MySQL + In the Instance ID field, enter an identifier for &mysql; instance. @@ -848,13 +848,13 @@ tcp-router-tcp-router-public LoadBalancer 10.23.241.17 35.197 - Click Create and wait for the MySQL instance to finish + Click Create and wait for the &mysql; instance to finish creating. - After the MySQL instance is finished creating, connect to it using either + After the &mysql; instance is finished creating, connect to it using either the Cloud Shell or the mysql command line client. @@ -865,7 +865,7 @@ tcp-router-tcp-router-public LoadBalancer 10.23.241.17 35.197 - Click on the instance ID of the MySQL instance. + Click on the instance ID of the &mysql; instance. @@ -879,7 +879,7 @@ tcp-router-tcp-router-public LoadBalancer 10.23.241.17 35.197 After the shell is opened, the gcloud sql connect command is displayed. Press Enter to connect to the - MySQL instance as the root user. + &mysql; instance as the root user. @@ -897,7 +897,7 @@ tcp-router-tcp-router-public LoadBalancer 10.23.241.17 35.197 - Click on the instance ID of the MySQL instance. + Click on the instance ID of the &mysql; instance. @@ -926,7 +926,7 @@ tcp-router-tcp-router-public LoadBalancer 10.23.241.17 35.197 - After connecting to the MySQL instance, run the following commands to + After connecting to the &mysql; instance, run the following commands to create an initial user. The service broker will use this user to connect to the service broker database. @@ -1126,14 +1126,14 @@ applications: Enable access to a service. This example enables access to the Google - CloudSQL MySQL service (see + CloudSQL &mysql; service (see ). &prompt.user;cf enable-service-access google-cloudsql-mysql - Create an instance of the Google CloudSQL MySQL service. This example uses + Create an instance of the Google CloudSQL &mysql; service. This example uses the mysql-db-f1-micro plan. Use the -c flag to pass optional parameters when provisioning a service. See for the parameters that can be set for each service. @@ -1206,7 +1206,7 @@ applications: - Update the kernel command line and GRUB then restart the virtual machines. + Update the kernel command line and &grub; then restart the virtual machines. &prompt.user;echo "$NEW_VM_NODES" | xargs -i{} gcloud compute ssh {} -- "sudo sed --in-place 's/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0\"/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0 cgroup_enable=memory swapaccount=1\"/g' /etc/default/grub.d/50-cloudimg-settings.cfg && sudo update-grub && sudo systemctl reboot -i" @@ -1226,7 +1226,7 @@ applications: When inside the node, run the command to update the kernel command line - and GRUB, then restart the node. + and &grub;, then restart the node. &prompt.user;sudo sed --in-place 's/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0\"/GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 net.ifnames=0 cgroup_enable=memory swapaccount=1\"/g' /etc/default/grub.d/50-cloudimg-settings.cfg && sudo update-grub && sudo systemctl reboot -i diff --git a/xml/cap_depl_install_caasp.xml b/xml/cap_depl_install_caasp.xml index 01ae86ec..89cd767a 100644 --- a/xml/cap_depl_install_caasp.xml +++ b/xml/cap_depl_install_caasp.xml @@ -9,7 +9,7 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink"> - Deploying &productname; on &suse; &caasp; + Deploying &productname; on &caasp; yes @@ -35,7 +35,7 @@ You will run most of the commands in this chapter from a remote workstation, rather than directly on any of the &productname; nodes. These are indicated - by the unprivileged user Tux, while root prompts are on a cluster node. + by the unprivileged user &exampleuser;, while root prompts are on a cluster node. There are few tasks that need to be performed directly on any of the cluster hosts. @@ -119,10 +119,10 @@ - Install &suse; &caasp; + Install &caasp; - &productname; is supported on &suse; &caasp; 3.x. + &productname; is supported on &caasp; 3.x. &kernel-prereq; @@ -134,10 +134,10 @@ &caasp; 3 and logging into the Velum Web interface, check the box to - install Tiller (&helm;'s server component). + install &tiller; (&helm;'s server component).
- Install Tiller + Install &tiller; @@ -159,9 +159,9 @@ node, is to use AutoYaST; see Installation with AutoYaST. Set up &caasp; with one admin node and - at least three Kubernetes masters and three Kubernetes workers. You also + at least three &kube; masters and three &kube; workers. You also need an Internet connection, as the installer downloads additional - packages, and the &kube; workers will each download ~10 GB of Docker + packages, and the &kube; workers will each download ~10 GB of &docker; images.
@@ -238,7 +238,7 @@ d028551.example.com Ready <none> 4h v1.10.11 Zypper, you will install the &helm; client on your workstation to install the required Kubernetes applications to set up &productname;, and to administer your cluster remotely. &helm; is the &kube; package manager. - The &helm; client goes on your remote administration computer, and Tiller + The &helm; client goes on your remote administration computer, and &tiller; is &helm;'s server, which is installed on your &kube; cluster. @@ -286,7 +286,7 @@ Happy Helming! Pod Security Policy - &suse; &caasp; 3 includes Pod Security Policy (PSP) support. This change + &caasp; 3 includes Pod Security Policy (PSP) support. This change adds two new PSPs to &caasp; 3: @@ -311,7 +311,7 @@ Happy Helming! &productname; &productnumber; includes the necessary PSP configurations in - the &helm; charts to run on &suse; &caasp;, and are set up automatically + the &helm; charts to run on &caasp;, and are set up automatically without requiring manual configuration. See for instructions on applying the necessary PSPs manually on older ∩ releases. @@ -480,7 +480,7 @@ persistentvolume "pvc-c464ed6a-3852-11e8-bd10-90b8d0c59f1c" deleted Load Balancers Production deployments usually include some kind of load balancer. - Running &productname; on &suse; &caasp; provides the flexibility to + Running &productname; on &caasp; provides the flexibility to structure your network nearly any way you need to. This guide does not describe how to set up a load balancer or name services, as these depend on your requirements and existing network architecture. @@ -551,7 +551,7 @@ kube: # The registry the images will be fetched from. # The values below should work for - # a default installation from the SUSE registry. + # a default installation from the &suse; registry. registry: hostname: "registry.suse.com" username: "" diff --git a/xml/cap_depl_openstack.xml b/xml/cap_depl_openstack.xml index a57de32f..95c8f35c 100644 --- a/xml/cap_depl_openstack.xml +++ b/xml/cap_depl_openstack.xml @@ -234,7 +234,7 @@ workers = 2 - For the External Kubernetes API FQDN, use the public + For the External &kube; API FQDN, use the public (floating) IP address of the &caasp; master and append the .xip.io domain suffix diff --git a/xml/cap_depl_stratos.xml b/xml/cap_depl_stratos.xml index 3200f1b8..ddb37923 100644 --- a/xml/cap_depl_stratos.xml +++ b/xml/cap_depl_stratos.xml @@ -18,7 +18,7 @@ The Stratos user interface (UI) is a modern web-based management application - for Cloud Foundry. It provides a graphical management console for both + for &cf;. It provides a graphical management console for both developers and system administrators. Install Stratos with &helm; after all of the uaa and scf pods are running. @@ -49,7 +49,7 @@ sed 's/"namespace": "default"/"namespace": "stratos"/' | kubectl create --filename - - You should already have the Stratos charts when you downloaded the SUSE + You should already have the Stratos charts when you downloaded the &suse; charts repository (see ). Search your &helm; repository to verify that you have the suse/console chart: @@ -148,7 +148,7 @@ stratos 1 1 3h On the Register a new Endpoint view, click the - SUSE CaaS Platform button. + &caasp; button. @@ -159,9 +159,9 @@ stratos 1 1 3h - Enter the URL of the API server for your Kubernetes environment in the + Enter the URL of the API server for your &kube; environment in the Endpoint Address field. Run kubectl cluster-info - and use the value of Kubernetes master as the URL. + and use the value of &kube; master as the URL. &prompt.user;kubectl cluster-info @@ -193,7 +193,7 @@ stratos 1 1 3h - In the Stratos UI, go to Kubernetes in the left-hand side + In the Stratos UI, go to &kube; in the left-hand side navigation. Information for your &susecaaspreg; environment should now be displayed. @@ -303,7 +303,7 @@ gp2scoped kubernetes.io/aws-ebs 1d On the Register a new Endpoint view, click the - Amazon EKS button. + &eks; button. @@ -314,9 +314,9 @@ gp2scoped kubernetes.io/aws-ebs 1d - Enter the URL of the API server for your Kubernetes environment in the + Enter the URL of the API server for your &kube; environment in the Endpoint Address field. Run kubectl cluster-info - and use the value of Kubernetes master as the URL. + and use the value of &kube; master as the URL. &prompt.user;kubectl cluster-info @@ -361,7 +361,7 @@ gp2scoped kubernetes.io/aws-ebs 1d - In the Stratos UI, go to Kubernetes in the left-hand side + In the Stratos UI, go to &kube; in the left-hand side navigation. Information for your &eks; environment should now be displayed. @@ -443,9 +443,9 @@ gp2scoped kubernetes.io/aws-ebs 1d - Enter the URL of the API server for your Kubernetes environment in the + Enter the URL of the API server for your &kube; environment in the Endpoint Address field. Run kubectl cluster-info - and use the value of Kubernetes master as the URL. + and use the value of &kube; master as the URL. &prompt.user;kubectl cluster-info @@ -477,7 +477,7 @@ gp2scoped kubernetes.io/aws-ebs 1d - In the Stratos UI, go to Kubernetes in the left-hand side + In the Stratos UI, go to &kube; in the left-hand side navigation. Information for your &aks; environment should now be displayed. @@ -548,7 +548,7 @@ gp2scoped kubernetes.io/aws-ebs 1d On the Register a new Endpoint view, click the - Google Kubernetes Engine button. + &gke-full; button. @@ -559,9 +559,9 @@ gp2scoped kubernetes.io/aws-ebs 1d - Enter the URL of the API server for your Kubernetes environment in the + Enter the URL of the API server for your &kube; environment in the Endpoint Address field. Run kubectl cluster-info - and use the value of Kubernetes master as the URL. + and use the value of &kube; master as the URL. &prompt.user;kubectl cluster-info @@ -597,7 +597,7 @@ gp2scoped kubernetes.io/aws-ebs 1d - In the Stratos UI, go to Kubernetes in the left-hand side + In the Stratos UI, go to &kube; in the left-hand side navigation. Information for your &gke; environment should now be displayed. @@ -622,8 +622,8 @@ gp2scoped kubernetes.io/aws-ebs 1d Stratos can show metrics data from - Prometheus for both Cloud - Foundry and Kubernetes. + Prometheus for both &cf; + and &kube;. To learn more about Stratos Metrics and its full list of configuration @@ -635,8 +635,8 @@ gp2scoped kubernetes.io/aws-ebs 1d In order to display metrics data with Stratos, you need to deploy the stratos-metrics &helm; chart - this deploys Prometheus - with the necessary exporters that collect data from Cloud Foundry and - Kubernetes. It also wraps Prometheus with an + with the necessary exporters that collect data from &cf; and + &kube;. It also wraps Prometheus with an nginx server to provide authentication. @@ -669,7 +669,7 @@ services: apiEndpoint is the same URL that you used when - registering your Kubernetes environment with Stratos (the Kubernetes API + registering your &kube; environment with Stratos (the &kube; API Server URL) @@ -879,7 +879,7 @@ susecf-metrics-metrics-nginx NodePort 172.28.107.209 10.86.101.115,172.28. - On the Kubernetes views, views such as the Node view should show an + On the &kube; views, views such as the Node view should show an additional Metrics tab with metric information.
diff --git a/xml/cap_intro.xml b/xml/cap_intro.xml index def870f9..d23281b9 100644 --- a/xml/cap_intro.xml +++ b/xml/cap_intro.xml @@ -22,7 +22,7 @@ which runs on &sle; containers. - ∩ is designed to run on any Kubernetes cluster. This guide describes how + ∩ is designed to run on any &kube; cluster. This guide describes how to deploy it on: diff --git a/xml/cap_kube_requirements.xml b/xml/cap_kube_requirements.xml index f2c6f6d6..b22ed6e4 100644 --- a/xml/cap_kube_requirements.xml +++ b/xml/cap_kube_requirements.xml @@ -9,7 +9,7 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink"> - Running &productname; on non-&suse; &caasp; &kube; Systems + Running &productname; on non-&caasp; &kube; Systems yes @@ -26,7 +26,7 @@ - Kubernetes API version of at least 1.10, but less than 1.16 + &kube; API version of at least 1.10, but less than 1.16 @@ -68,7 +68,7 @@ - Docker must be configured to allow privileged containers + &docker; must be configured to allow privileged containers @@ -91,7 +91,7 @@ - Helm's Tiller has to be installed and active, with Tiller on the &kube; + Helm's &tiller; has to be installed and active, with &tiller; on the &kube; cluster and Helm on your remote administration machine diff --git a/xml/cap_overview.xml b/xml/cap_overview.xml index a92cad11..97fa38f7 100644 --- a/xml/cap_overview.xml +++ b/xml/cap_overview.xml @@ -67,7 +67,7 @@ - Add support for connecting SUSE Containers as a Service Platform V4 + Add support for connecting &caasp; V4 endpoints @@ -88,8 +88,7 @@ See all product manuals for &productname; 1.x at - SUSE - Cloud Application Platform 1. + &productname; 1. @@ -127,18 +126,18 @@ - All Docker images for the &sle; builds are hosted on + All &docker; images for the &sle; builds are hosted on registry.suse.com. These are the commercially-supported images. (Community-supported images for &opensuse; are hosted on - Docker Hub.) Product + &docker; Hub.) Product manuals on - SUSE - Doc: SUSE Cloud Application Platform 1 refer to the + &suse; + Doc: &productname; 1 refer to the commercially-supported &sle; version. - ∩ is designed to run on any Kubernetes cluster. This guide describes how + ∩ is designed to run on any &kube; cluster. This guide describes how to deploy it on: @@ -225,8 +224,8 @@ The principle interface and API for deploying applications to &productname; - is &scf;. Most Cloud Foundry distributions run on virtual machines managed - by &bosh;. &scf; runs in SUSE Linux Enterprise containers managed by &kube;. + is &scf;. Most &cf; distributions run on virtual machines managed + by &bosh;. &scf; runs in &sle; containers managed by &kube;. Containerizing the components of the platform itself has these advantages: @@ -248,7 +247,7 @@ - &scf; packages upstream Cloud Foundry &bosh; releases to produce containers + &scf; packages upstream &cf; &bosh; releases to produce containers and configurations which are deployed to &kube; clusters using &helm;. @@ -256,9 +255,8 @@ Minimum Requirements - This guide details the steps for deploying &scf; on &suse; &caasp;, and on - supported &kube; environments such as Microsoft Azure Kubernetes Service - (AKS), and Amazon Elastic Container Service for Kubernetes (EKS). &suse; + This guide details the steps for deploying &scf; on &caasp;, and on + supported &kube; environments such as &aks-full;, &gke-full;, and &eks-full;. &caasp; is a specialized application development and hosting platform built on the &susemicros; container host operating system, container orchestration with &kube;, and &salt; for automating installation and configuration. @@ -268,7 +266,7 @@ Required Knowledge Installing and administering &productname; requires knowledge of Linux, - &docker;, &kube;, and your &kube; platform (for example &suse; &caasp;, + &docker;, &kube;, and your &kube; platform (for example &caasp;, AKS, EKS, OpenStack). You must plan resource allocation and network architecture by taking into account the requirements of your &kube; platform in addition to &scf; requirements. &scf; is a discrete component @@ -280,8 +278,8 @@ You may create a minimal deployment on four &kube; nodes for testing. However, this is insufficient for a production deployment. A supported - deployment includes &scf; installed on &suse; &caasp;, Amazon EKS, or Azure - AKS. You also need a storage back-end such as &ses; or NFS, a DNS/DHCP + deployment includes &scf; installed on &caasp;, &eks;, &gke;, or &aks;. + You also need a storage back-end such as &ses; or NFS, a DNS/DHCP server, and an Internet connection to download additional packages during installation and ~10 GB of &docker; images on each &kube; worker after installation. (See .) @@ -289,10 +287,10 @@ A production deployment requires considerable resources. &productname; - includes an entitlement of &suse; &caasp; and &ses;. &ses; alone has + includes an entitlement of &caasp; and &ses;. &ses; alone has substantial requirements; see the Tech - Specs for details. &suse; &caasp; requires a minimum of four hosts: + Specs for details. &caasp; requires a minimum of four hosts: one admin and three &kube; nodes. &scf; is then deployed on the &kube; nodes. Four &caasp; nodes are not sufficient for a production deployment. describes a minimal production @@ -360,7 +358,7 @@ - &suse; &caasp; is a Container as a Service platform, and &productname; is a + &caasp; is a Container as a Service platform, and &productname; is a PaaS. @@ -379,7 +377,7 @@
- illustrates how &suse; &caasp; and + illustrates how &caasp; and &productname; containerize the platform itself. @@ -392,7 +390,7 @@ - &suse; &caasp; and &productname; containerize the + &caasp; and &productname; containerize the platform itself. @@ -436,7 +434,7 @@
- Docker Trusted Registry owned by SUSE. + &docker; Trusted Registry owned by &suse;. @@ -446,7 +444,7 @@ - &helm;, the &kube; package manager, which includes Tiller, the &helm; + &helm;, the &kube; package manager, which includes &tiller;, the &helm; server, and the helm command line client. @@ -624,7 +622,7 @@ mysql - A MariaDB server and component to route requests to replicas. (A + A &mariadb; server and component to route requests to replicas. (A separate copy is deployed for uaa.) @@ -658,7 +656,7 @@ router - Routes application and API traffic. Exposed using a Kubernetes service. + Routes application and API traffic. Exposed using a &kube; service. @@ -758,13 +756,13 @@ External HTTPS Internal &kube; components - Download Docker Images + Download &docker; Images Refer to registry.suse.com Refer to registry.suse.com registry.suse.com - Docker images + &docker; images None - Docker images that make up ∩ are downloaded + &docker; images that make up ∩ are downloaded 3 diff --git a/xml/cap_troubleshooting.xml b/xml/cap_troubleshooting.xml index b6d9229c..d24a0834 100644 --- a/xml/cap_troubleshooting.xml +++ b/xml/cap_troubleshooting.xml @@ -41,13 +41,13 @@ If you ever need to request support, or just want to generate detailed system information and logs, use the supportconfig utility. Run it with no options to collect basic system information, and - also cluster logs including Docker, etcd, flannel, and Velum. + also cluster logs including &docker;, etcd, flannel, and Velum. supportconfig may give you all the information you need.
supportconfig -h prints the options. Read the "Gathering - System Information for Support" chapter in any &sle; Administration Guide to + System Information for Support" chapter in any &sle; &admin; to learn more. diff --git a/xml/common_copyright_gfdl.xml b/xml/common_copyright_gfdl.xml index 3aabb8d0..82e07ba7 100644 --- a/xml/common_copyright_gfdl.xml +++ b/xml/common_copyright_gfdl.xml @@ -23,7 +23,7 @@ Copyright © 2006– - SUSE LLC and contributors. All rights reserved. + &suse; LLC and contributors. All rights reserved. Permission is granted to copy, distribute and/or modify this document under @@ -41,7 +41,7 @@ All information found in this book has been compiled with utmost attention to - detail. However, this does not guarantee complete accuracy. Neither SUSE LLC, + detail. However, this does not guarantee complete accuracy. Neither &suse; LLC, its affiliates, the authors nor the translators shall be held liable for possible errors or the consequences thereof. diff --git a/xml/common_intro_making_i.xml b/xml/common_intro_making_i.xml index 1855703f..303be681 100644 --- a/xml/common_intro_making_i.xml +++ b/xml/common_intro_making_i.xml @@ -20,7 +20,9 @@ - This documentation is written in SUSEDoc, a subset of + This documentation is written in + GeekoDoc, + a subset of DocBook 5. diff --git a/xml/entity-decl.ent b/xml/entity-decl.ent index e594b7de..3e477137 100644 --- a/xml/entity-decl.ent +++ b/xml/entity-decl.ent @@ -2,8 +2,8 @@ - - + + @@ -24,7 +24,7 @@ - + @@ -147,10 +147,10 @@ AppArmor®"> sudo"> - + - + @@ -170,11 +170,11 @@ - + - + - + diff --git a/xml/repeated-content-decl.ent b/xml/repeated-content-decl.ent index 24d7ac6e..e2c183b0 100644 --- a/xml/repeated-content-decl.ent +++ b/xml/repeated-content-decl.ent @@ -102,7 +102,7 @@ Before you start deploying &productname;, review the following documents: Read the Release Notes: - Release Notes SUSE Cloud Application Platform + Release Notes &productname; Read @@ -337,7 +337,7 @@ Before you start deploying &productname;, review the following documents: For &slea;, ensure the &productname; Tools Module has been added. Add the module using &yast; or &suseconnect;. -&prompt.user;SUSEConnect --product sle-module-cap-tools/15.1/x86_64 +&prompt.user;&suseconnect; --product sle-module-cap-tools/15.1/x86_64 For other systems, follow the instructions at . @@ -445,15 +445,15 @@ Before you start deploying &productname;, review the following documents: &prompt.user;helm search suse NAME CHART VERSION APP VERSION DESCRIPTION -suse/cf &latestscfchart; &chart-appversion; A Helm chart for SUSE Cloud Foundry -suse/cf-usb-sidecar-mysql 1.0.1 A Helm chart for SUSE Universal Service Broker Sidecar fo... -suse/cf-usb-sidecar-postgres 1.0.1 A Helm chart for SUSE Universal Service Broker Sidecar fo... +suse/cf &latestscfchart; &chart-appversion; A Helm chart for &suse; &cf; +suse/cf-usb-sidecar-mysql 1.0.1 A Helm chart for &suse; Universal Service Broker Sidecar fo... +suse/cf-usb-sidecar-postgres 1.0.1 A Helm chart for &suse; Universal Service Broker Sidecar fo... suse/console &lateststratoschart; &chart-appversion; A Helm chart for deploying Stratos UI Console suse/log-agent-rsyslog 1.0.1 8.39.0 Log Agent for forwarding logs of K8s control pl... suse/metrics &metrics-version; &chart-appversion; A Helm chart for Stratos Metrics suse/minibroker &minibroker-version; A minibroker for your minikube suse/nginx-ingress 0.28.4 0.15.0 An nginx Ingress controller that uses ConfigMap to store ... -suse/uaa &latestscfchart; &chart-appversion; A Helm chart for SUSE UAA +suse/uaa &latestscfchart; &chart-appversion; A Helm chart for &suse; UAA '> @@ -1215,7 +1215,7 @@ done Other Features - Refer to the Administration Guide at + Refer to the &admin; at for additional features. diff --git a/xml/sec_cf_usb_url.xml b/xml/sec_cf_usb_url.xml index 918207bb..2a6e11ae 100644 --- a/xml/sec_cf_usb_url.xml +++ b/xml/sec_cf_usb_url.xml @@ -13,7 +13,7 @@ This change is only applicable for upgrades from ∩ 1.2.1 to ∩ 1.3 and upgrades from ∩ 1.3 to ∩ 1.3.1. The URL of the internal cf-usb broker - endpoint has changed. Brokers for PostgreSQL and MySQL that use cf-usb will + endpoint has changed. Brokers for &postgresql; and &mariadb; that use cf-usb will require the following manual fix after upgrading to reconnect with SCF/CAP: