diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index b7353733..00000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Desktop (please complete the following information):** - - OS: [e.g. iOS] - - Browser [e.g. chrome, safari] - - Version [e.g. 22] - -**Smartphone (please complete the following information):** - - Device: [e.g. iPhone6] - - OS: [e.g. iOS8.1] - - Browser [e.g. stock browser, safari] - - Version [e.g. 22] - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/content-request.md b/.github/ISSUE_TEMPLATE/content-request.md deleted file mode 100644 index 18755f57..00000000 --- a/.github/ISSUE_TEMPLATE/content-request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Content request -about: Suggest a change or addition to the content - ---- - -**URL or GitHub link to the page where you're having the issue** -Where in the documentation set can I see the problem? If it's a more general request, then at least identify the product and version. - -**Is your request for a change or addition to content related to a problem? If so, please describe.** -A clear and concise description of the problem. For example: "Whenever I upgrade my software I forget to first collect information about the things I've already installed" - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or content you've considered. - -**Additional context** -Add any other context or screenshots about the content request here. diff --git a/README.md b/README.md index 5eb248e7..3c699102 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,23 @@ # Introduction -This is the docs source for -[Platform Automation Toolkit](https://network.pivotal.io/products/platform-automation), -available from VMware Tanzu Network. +This is the download source for +[Platform Automation Toolkit](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Platform%20Automation%20Toolkit +), +available from the Broadcom Support portal. The production docs are here: -https://docs.pivotal.io/platform-automation/ +https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/platform-automation-toolkit-for-tanzu/5-2/vmware-automation-toolkit/docs-index.html -There is a public staging copy here: -https://docs-pcf-staging.tas.vmware.com/platform-automation/ +There is a staging copy here: +https://author-techdocs2-prod.adobecqms.net/us/en/vmware-tanzu/platform/platform-automation-toolkit-for-tanzu/5-2/vmware-automation-toolkit/docs-index.html # Usage +>**Important**: Mkdocs is no longer used. This doc set is now built in DocWorks. +>Contact your writer on the TAS IX team for help building the docs. +>See https://docworks.vmware.com/one/scene?permalink=uniqueId%3DMarkdown-Project-3084. + + We use [`mkdocs`](https://www.mkdocs.org/) for our documentation engine. To use it locally, it will require `python3` to be installed. diff --git a/ci/ci/deployments.yml b/ci/ci/deployments.yml deleted file mode 100644 index 1bff06ea..00000000 --- a/ci/ci/deployments.yml +++ /dev/null @@ -1,41 +0,0 @@ -#@data/values ---- -deployments: - - env_name: ci-aws - paving_dir: aws - opsman_glob: "*aws*.yml" - tags: [ ] - enable_timed_trigger: true - - env_name: ci-azure - paving_dir: azure - opsman_glob: "*azure*.yml" - tags: [ ] - enable_timed_trigger: true -#! - env_name: ci-vsphere -#! paving_dir: nsxt -#! opsman_glob: "*vsphere*.ova" -#! tags: -#! - vsphere-pez -#! enable_timed_trigger: true - - env_name: ci-upgrade - paving_dir: gcp - opsman_glob: "*gcp*.yml" - tags: [ ] - enable_timed_trigger: true - - env_name: ci-support - paving_dir: gcp - opsman_glob: "*gcp*.yml" - tags: [ ] - enable_timed_trigger: true - - env_name: reference-gcp - paving_dir: gcp - opsman_glob: "*gcp*.yml" - tags: [ ] - enable_timed_trigger: false -versions: - - number: v4.4 - regex: 4\.4\..* - vsphere_image: false - - number: v5.0 - regex: 5\.0\..* - vsphere_image: true diff --git a/ci/ci/pipeline.yml b/ci/ci/pipeline.yml deleted file mode 100644 index b3f4f03c..00000000 --- a/ci/ci/pipeline.yml +++ /dev/null @@ -1,1815 +0,0 @@ -#@ load("@ytt:data", "data") -#@ deployments_without_gcp = [x for x in data.values.deployments if x.paving_dir != "gcp"] -groups: -- name: ci - jobs: - - additional-task-testing - - build-binaries-image-combined - - build-ci-image - - build-packages-image - - bump-version-to-major - - check-for-secrets-in-tasks - #@ for/end deployment in data.values.deployments: - - #@ deployment.env_name + "-delete-infrastructure" - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - create-pks-cluster-in-reference-pipeline - - prepare-resource-for-reference-pipeline - - promote-to-final - - reference-gcp-delete-infrastructure - - test-image-dependency-stability - - upgrade-opsman-gcp -- name: bump - jobs: - - bump-test-image-dependency-stability - - bump-previous-versions-trigger - #@ for/end version in data.values.versions: - - #@ "update-" + version.number - - empty-cve-patch-notes-file -- name: om - jobs: - #@ for/end bump in ["patch", "minor", "major"]: - - #@ "bump-om-" + bump -- name: vmware-srp-report - jobs: - - build-concourse-buildinfo-image - - send-srp-report - -resources: -- name: docs-platform-automation-reference-pipeline-config - type: git - source: - branch: develop - uri: https://github.com/pivotal/docs-platform-automation-reference-pipeline-config -- name: ci-image - type: registry-image - source: - password: ((docker.password)) - repository: ((docker.ci-repository)) - tag: testing - username: ((docker.username)) -- name: docs-platform-automation - type: git - source: - branch: develop - private_key: ((platform_automation_docs.private_key)) - uri: git@github.com:pivotal/docs-platform-automation - ignore_paths: - - docs -- name: docs-platform-automation-with-docs - type: git - source: - branch: develop - private_key: ((platform_automation_docs.private_key)) - uri: git@github.com:pivotal/docs-platform-automation -- name: pas-windows - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: pas-windows - product_version: 2\.[0-9]+\.[0-9]+$ - sort_by: semver -- name: opsman-image - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: ops-manager - product_version: 2\.[0-9]+\.[0-9]+$ - sort_by: semver -- name: opsman-image-2.9.42 - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: ops-manager - product_version: 2\.9\.42$ -- name: opsman-image-2.9.x - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: ops-manager - product_version: 2\.9\.[0-9]+$ - sort_by: semver -- name: pks-cli - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: pivotal-container-service - product_version: 1\.9\.[0-9]+$ - sort_by: semver -- name: packages-image - type: registry-image - source: - password: ((docker.password)) - repository: internalpcfplatformautomation/platform-automation - tag: packages - username: ((docker.username)) -- name: packages-image-oci - type: registry-image - source: - password: ((docker.password)) - repository: internalpcfplatformautomation/platform-automation - tag: packages - username: ((docker.username)) -- name: binaries-image - type: registry-image - source: - password: ((docker.password)) - repository: internalpcfplatformautomation/platform-automation - tag: testing - username: ((docker.username)) -- name: vsphere-only-image - type: registry-image - source: - password: ((docker.password)) - repository: internalpcfplatformautomation/platform-automation - tag: vsphere-only - username: ((docker.username)) -- name: pivnet-rc - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: platform-automation - product_version: \d+\.\d+\.\d+ -- name: srp-helper - type: registry-image - source: - repository: harbor.dhaka.cf-app.com/srp/srp-helper-task - username: ((srp-cli-registry-creds.username)) - password: ((srp-cli-registry-creds.password)) -- name: osl - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: open_source_license_Platform_Automation_Toolkit_for_VMware_Tanzu_(.*)_GA.txt - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: odp - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: VMware-Tanzu-platform-automation-toolkit-(.*)-ODP.tar.gz - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: rc-image-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: platform-automation-image-(.*).tgz - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: rc-image-s3-vsphere - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: vsphere-platform-automation-image-(.*).tar.gz - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: rc-tasks-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: platform-automation-tasks-(.*).zip - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: rc-image-receipt-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: image-receipt-(.*) - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: rc-vsphere-image-receipt-s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: vsphere/image-receipt-(.*) - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) - type: s3 -- name: osl-validated-image-receipt-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: image-receipt-(.*) - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) - version: - path: image-receipt-5.1.0-rc.139 -- name: image-receipt-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: image-receipt-(.*) - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: platform-automation-tasks-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.pivnet_products)) - regexp: platform-automation-tasks-(.*).zip - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: platform-automation-image-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.pivnet_products)) - regexp: platform-automation-image-(.*).tgz - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: version - type: semver - source: - driver: s3 - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) - key: version - initial_version: 0.0.1-rc.1 -- name: daily - type: time - source: - interval: 24h - location: America/Denver - start: 5:00 AM - stop: 6:00 AM -- name: nightly - type: time - source: - interval: 24h - location: America/Denver - start: 8:00 PM - stop: 9:00 PM -- name: deployments - type: git - source: - branch: main - private_key: ((platform_automation_deployments.private_key)) - uri: git@github.com:pivotal/platform-automation-deployments -- name: paving - type: git - source: - uri: https://github.com/pivotal/paving -- name: govc-cli - type: github-release - source: - access_token: ((om.access_token)) - owner: vmware - repository: govmomi -- name: credhub-cli - type: github-release - source: - access_token: ((om.access_token)) - owner: cloudfoundry-incubator - repository: credhub-cli -- name: bbr-cli - type: github-release - source: - access_token: ((om.access_token)) - owner: cloudfoundry-incubator - repository: bosh-backup-and-restore -- name: bosh-cli - type: github-release - source: - access_token: ((om.access_token)) - owner: cloudfoundry - repository: bosh-cli -- name: winfs-injector - type: github-release - source: - access_token: ((om.access_token)) - owner: pivotal-cf - repository: winfs-injector -- name: isolation-segment-replicator - type: github-release - source: - access_token: ((om.access_token)) - owner: pivotal-cf - repository: replicator -- name: slack - type: slack-notification - source: - url: ((slack_webhook_url)) -- name: os-conf-release - type: bosh-io-release - source: - repository: cloudfoundry/os-conf-release -#@ for version in data.values.versions: -- name: #@ "docs-platform-automation-" + version.number - type: git - source: - branch: #@ version.number - private_key: ((platform_automation_docs.private_key)) - uri: git@github.com:pivotal/docs-platform-automation -- name: #@ "platform-automation-" + version.number - type: git - source: - branch: #@ version.number - private_key: ((platform_automation_docs.private_key)) - uri: git@github.com:pivotal/docs-platform-automation -- name: #@ "pivnet-release-" + version.number - type: pivnet - source: - access_key_id: ((pivnet_aws_access_key)) - api_token: ((pivnet_token)) - product_slug: platform-automation - product_version: #@ version.regex - secret_access_key: ((pivnet_aws_secret_key)) -- name: #@ "version-" + version.number - type: semver - source: - driver: s3 - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) - key: #@ "version-" + version.number -#@ end -- name: binaries-table - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: bump-image-cli-versions-(.*).md - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: om - type: git - source: - uri: git@github.com:pivotal-cf/om.git - branch: main - ignore_paths: - - version - - README.md - - docs - private_key: ((om.private_key)) - fetch_tags: false -- name: om-version - type: semver - source: - initial_version: 0.0.0 - driver: git - uri: git@github.com:pivotal-cf/om.git - branch: main - file: version - private_key: ((om.private_key)) -- name: concourse-build-info - type: buildinfo - icon: tag-text-outline - expose_build_created_by: true - source: - metadata: - project: aloha - key2: value2 -- name: build-info-image - type: registry-image - source: - password: ((docker.password)) - repository: ((docker.ci-repository)) - tag: buildinfo - username: ((docker.username)) - -resource_types: -- name: pivnet - type: registry-image - source: - repository: pivotalcf/pivnet-resource - tag: latest-final -- name: slack-notification - type: registry-image - source: - repository: cfcommunity/slack-notification-resource - tag: latest -- name: buildinfo - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: buildinfo - -jobs: -- name: build-ci-image - plan: - - get: daily - trigger: true - - get: docs-platform-automation - - task: build-ci-image - privileged: true - file: docs-platform-automation/ci/tasks/build-oci-image.yml - params: - DOCKERFILE: docs-platform-automation/ci/dockerfiles/Dockerfile.ci - - put: ci-image - params: { image: image/image.tar } - get_params: { skip_download: true } - -- name: build-packages-image - plan: - - get: daily - trigger: true - - get: docs-platform-automation - - task: build-packages-image - privileged: true - file: docs-platform-automation/ci/tasks/build-oci-image.yml - params: - DOCKERFILE: docs-platform-automation/ci/dockerfiles/Dockerfile.packages - - put: packages-image - params: { image: image/image.tar } - -- name: build-binaries-image-combined - serial_groups: - - image - - task - plan: - - in_parallel: - - get: osl - - get: govc-cli - trigger: true - params: - globs: - - 'govc_Linux_x86_64*' - - get: bosh-cli - trigger: true - params: - globs: - - '*linux*' - - get: credhub-cli - trigger: true - params: - globs: - - '*linux*' - - get: bbr-cli - trigger: true - params: - globs: - - 'bbr-[^s3]*linux*[^.sha256]' - - get: winfs-injector - trigger: true - params: - globs: - - '*linux' - - get: isolation-segment-replicator - trigger: true - params: - globs: - - '*linux' - - get: om - trigger: true - - get: docs-platform-automation - trigger: false - - get: packages-image - trigger: true - passed: - - build-packages-image - - get: packages-image-oci - params: { format: oci } - - get: version - params: - pre: rc - - in_parallel: - - task: download-product - params: - OM_pivnet_token: ((pivnet_token)) - file: docs-platform-automation/ci/tasks/download-product.yml - - task: test - attempts: 3 - file: docs-platform-automation/ci/tasks/test-and-build-om.yml - - task: build-binaries-image - privileged: true - config: - platform: linux - image_resource: - type: registry-image - source: - repository: vito/oci-build-task - params: - DOCKERFILE: docs-platform-automation/ci/dockerfiles/Dockerfile.binaries - IMAGE_ARG_base_image: packages-image-oci/image.tar - inputs: - - name: bbr-cli - - name: bosh-cli - - name: isolation-segment-replicator - - name: om-cli - - name: winfs-injector - - name: govc-cli - - name: credhub-cli - - name: docs-platform-automation - - name: packages-image-oci - outputs: - - name: image - run: - path: build - output_mapping: { image: binaries-image-oci } - - put: binaries-image - params: { image: binaries-image-oci/image.tar } - - task: build-vsphere-image - privileged: true - config: - platform: linux - image_resource: - type: registry-image - source: - repository: vito/oci-build-task - params: - DOCKERFILE: docs-platform-automation/ci/dockerfiles/Dockerfile.vsphere-only - IMAGE_ARG_base_image: binaries-image-oci/image.tar - inputs: - - name: docs-platform-automation - - name: binaries-image-oci - outputs: - - name: image - run: - path: build - - put: vsphere-only-image - params: { image: image/image.tar } - - in_parallel: - - task: define-cli-versions - image: binaries-image - file: docs-platform-automation/ci/tasks/pivnet-release/define-cli-versions.yml - - task: generate-dpkg-list-for-osl - file: docs-platform-automation/ci/tasks/generate-dpkg-list-for-OSL.yml - image: binaries-image - - task: generate-dpkg-list-for-osl - file: docs-platform-automation/ci/tasks/generate-dpkg-list-for-OSL.yml - output_mapping: - rc-image-receipt-s3: rc-vsphere-image-receipt-s3 - image: vsphere-only-image - - task: create-release-file - file: docs-platform-automation/ci/tasks/package-for-release/platform-automation.yml - input_mapping: - platform-automation-image: binaries-image - vsphere-platform-automation-image: vsphere-only-image - - in_parallel: - - task: test-docker-import - privileged: true - file: docs-platform-automation/ci/tasks/test-docker-import/task.yml - - task: test-vsphere-docker-import - privileged: true - file: docs-platform-automation/ci/tasks/test-docker-import/task.yml - params: - PRODUCT_PATH: vsphere-platform-automation-image-*.tar.gz - - in_parallel: - limit: 2 - steps: - - put: rc-image-receipt-s3 - params: - file: rc-image-receipt-s3/image-receipt-* - acl: public-read - - put: rc-vsphere-image-receipt-s3 - params: - file: rc-vsphere-image-receipt-s3/image-receipt-* - acl: public-read - - put: rc-image-s3 - params: - file: packaged-product/platform-automation-image-*.tgz - get_params: - skip_download: "true" - - put: rc-image-s3-vsphere - params: - file: packaged-product/vsphere-platform-automation-image-*.tar.gz - get_params: - skip_download: "true" - - put: rc-tasks-s3 - params: - file: packaged-product/platform-automation-tasks-*.zip - get_params: - skip_download: "true" - - put: binaries-table - params: - file: cli-versions-table/bump-image-cli-versions* - acl: public-read - - put: version - params: - file: version/version -- name: test-image-dependency-stability - plan: - - in_parallel: - - get: docs-platform-automation - - get: binaries-image - resource: rc-image-s3 - passed: [ build-binaries-image-combined ] - trigger: true - params: - unpack: true - - get: osl-validated-image-receipt-s3 - - task: generate-stub-version - config: - image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing - platform: linux - outputs: - - name: version - run: - path: bash - args: - - -c - - | - set -eux - echo "test" > version/version - - task: generate-dpkg-list-for-osl - file: docs-platform-automation/ci/tasks/generate-dpkg-list-for-OSL.yml - image: binaries-image - - task: check-osl-reuse-validity - file: docs-platform-automation/ci/tasks/check-osl-reuse-validity.yml -- name: bump-version-to-major - plan: - - put: version - params: - bump: major -- name: promote-to-final - plan: - - in_parallel: - - get: osl - - get: odp - - get: docs-platform-automation - - get: rc-image-s3 - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - additional-task-testing - params: - unpack: true - - get: rc-image-s3-vsphere - params: - unpack: true - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - additional-task-testing - - get: platform-automation-tasks - resource: rc-tasks-s3 - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - get: om - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - get: version - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - params: - bump: final - - task: create-release-file - file: docs-platform-automation/ci/tasks/package-for-release/platform-automation.yml - input_mapping: - platform-automation-image: rc-image-s3 - vsphere-platform-automation-image: rc-image-s3-vsphere - - task: test-docker-import - privileged: true - file: docs-platform-automation/ci/tasks/test-docker-import/task.yml - - task: generate-platform-automation-metadata - file: docs-platform-automation/ci/tasks/pivnet-release/generate-platform-automation-metadata-v5.1.yml - - task: generate-dpkg-list-for-osl - file: docs-platform-automation/ci/tasks/generate-dpkg-list-for-OSL.yml - image: rc-image-s3 - - put: pivnet-rc - params: - file_glob: packaged-product/* - metadata_file: metadata/metadata.yml - s3_filepath_prefix: ((pivnet_s3_filepath_prefix)) - - put: docs-platform-automation - params: - repository: docs-platform-automation - tag: version/version - tag_prefix: v - - put: version - params: - bump: minor - - put: image-receipt-s3 - params: - file: rc-image-receipt-s3/image-receipt-* - acl: public-read -- name: prepare-resource-for-reference-pipeline - serial_groups: - - image - - task - plan: - - in_parallel: - - get: rc-image-s3 - passed: - - build-binaries-image-combined - trigger: true - - get: rc-tasks-s3 - passed: - - build-binaries-image-combined - trigger: true - - in_parallel: - - put: platform-automation-image-s3 - params: - file: rc-image-s3/platform-automation-image-*.tgz - get_params: - skip_download: "true" - - put: platform-automation-tasks-s3 - params: - file: rc-tasks-s3/platform-automation-tasks-*.zip - get_params: - skip_download: "true" -- name: upgrade-opsman-gcp - serial: true - serial_groups: [ ci-upgrade ] - plan: - - in_parallel: - - get: om - passed: - - build-binaries-image-combined - - get: version - passed: - - build-binaries-image-combined - - get: platform-automation-tasks - resource: rc-tasks-s3 - passed: - - build-binaries-image-combined - params: - unpack: true - - get: binaries-table - passed: - - build-binaries-image-combined - - get: docs-platform-automation - - get: paving - - get: deployments - - get: platform-automation-image - resource: rc-image-s3 - passed: - - build-binaries-image-combined - trigger: true - params: - unpack: true - - get: rc-image-s3-vsphere - passed: - - build-binaries-image-combined - - get: opsman-image-2.9.42 - params: - globs: - - '*gcp*.yml' - - get: opsman-image-2.9.x - params: - globs: - - '*gcp*.yml' - - get: opsman-image - trigger: true - params: - globs: - - '*gcp*.yml' - - task: run-terraform - attempts: 2 - file: docs-platform-automation/ci/tasks/create-infrastructure/task.yml - params: - IAAS: gcp - DEPLOYMENT_NAME: ci-upgrade - OM_PASSWORD: ((opsman-login.password)) - OM_USERNAME: ((opsman-login.username)) - PLATFORM_AUTOMATION_EMAIL: ((platform-automation-email)) - ensure: - put: deployments - params: - rebase: true - repository: deployments - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - config: paving - tasks: platform-automation-tasks - vars: deployments - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config/ci/configuration/gcp/director.yml config/ci/configuration/auth.yml - VARS_PATHS: vars/ci-upgrade/terraform-vars.yml - - in_parallel: - - task: create-vm - file: platform-automation-tasks/tasks/create-vm.yml - input_mapping: - image: opsman-image-2.9.42 - state: deployments - config: paving - vars: deployments - params: - STATE_FILE: ci-upgrade/state.yml - OPSMAN_CONFIG_FILE: ci/configuration/gcp/ops-manager.yml - VARS_FILES: vars/ci-upgrade/terraform-vars.yml - image: platform-automation-image - ensure: - do: - - task: state-file - file: platform-automation-tasks/tasks/make-git-commit.yml - params: - GIT_AUTHOR_NAME: platform-automation-bot - GIT_AUTHOR_EMAIL: ((platform-automation-email)) - COMMIT_MESSAGE: "create-vm update state file for ci-upgrade" - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: ci-upgrade/state.yml - input_mapping: - repository: deployments - file-source: generated-state - image: platform-automation-image - - put: deployments - params: - rebase: true - repository: repository-commit - - task: configure-authentication - file: platform-automation-tasks/tasks/configure-authentication.yml - image: platform-automation-image - attempts: 20 - input_mapping: - env: deployments - config: paving - params: - ENV_FILE: ci-upgrade/env.yml - AUTH_CONFIG_FILE: ci/configuration/auth.yml - VARS_FILES: env/ci-upgrade/terraform-vars.yml - - task: configure-director - file: platform-automation-tasks/tasks/configure-director.yml - image: platform-automation-image - input_mapping: - env: deployments - config: paving - params: - ENV_FILE: ci-upgrade/env.yml - VARS_FILES: env/ci-upgrade/terraform-vars.yml - DIRECTOR_CONFIG_FILE: ci/configuration/gcp/director.yml - - task: apply-director-changes - file: platform-automation-tasks/tasks/apply-director-changes.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: stage-configure-apply-telemetry - file: platform-automation-tasks/tasks/stage-configure-apply.yml - image: platform-automation-image - attempts: 3 - input_mapping: - env: deployments - config: deployments - vars: deployments - params: - ENV_FILE: ci-upgrade/env.yml - CONFIG_FILE: ci-upgrade/p-telemetry.yml - STAGE_PRODUCT_CONFIG_FILE: ci-upgrade/p-telemetry.yml - VARS_FILE: env/ci-upgrade/terraform-vars.yml - - task: staged-director-config - file: platform-automation-tasks/tasks/staged-director-config.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: export-installation - file: platform-automation-tasks/tasks/export-installation.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: upgrade-opsman-patch - file: platform-automation-tasks/tasks/upgrade-opsman.yml - input_mapping: - image: opsman-image-2.9.x - state: deployments - config: paving - vars: deployments - env: deployments - params: - STATE_FILE: ci-upgrade/state.yml - OPSMAN_CONFIG_FILE: ci/configuration/gcp/ops-manager.yml - VARS_FILES: vars/ci-upgrade/terraform-vars.yml - ENV_FILE: ci-upgrade/env.yml - image: platform-automation-image - - task: apply-director-changes - file: platform-automation-tasks/tasks/apply-director-changes.yml - image: platform-automation-image - attempts: 5 - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: export-installation - file: platform-automation-tasks/tasks/export-installation.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: upgrade-opsman-minor - file: platform-automation-tasks/tasks/upgrade-opsman.yml - image: platform-automation-image - input_mapping: - image: opsman-image - state: deployments - config: paving - vars: deployments - env: deployments - params: - STATE_FILE: ci-upgrade/state.yml - OPSMAN_CONFIG_FILE: ci/configuration/gcp/ops-manager.yml - VARS_FILES: vars/ci-upgrade/terraform-vars.yml - ENV_FILE: ci-upgrade/env.yml - - task: apply-director-changes - file: platform-automation-tasks/tasks/apply-director-changes.yml - image: platform-automation-image - attempts: 5 - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: upgrade-opsman - file: platform-automation-tasks/tasks/upgrade-opsman.yml - image: platform-automation-image - input_mapping: - image: opsman-image - state: deployments - config: paving - vars: deployments - env: deployments - params: - STATE_FILE: ci-upgrade/state.yml - OPSMAN_CONFIG_FILE: ci/configuration/gcp/ops-manager.yml - VARS_FILES: vars/ci-upgrade/terraform-vars.yml - ENV_FILE: ci-upgrade/env.yml - - task: apply-director-changes - file: platform-automation-tasks/tasks/apply-director-changes.yml - image: platform-automation-image - attempts: 5 - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: delete-installation - file: platform-automation-tasks/tasks/delete-installation.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-upgrade/env.yml - - task: delete-gcp-opsman-vm - file: platform-automation-tasks/tasks/delete-vm.yml - image: platform-automation-image - input_mapping: - state: generated-state - config: paving - vars: deployments - params: - OPSMAN_CONFIG_FILE: ci/configuration/gcp/ops-manager.yml - VARS_FILES: vars/ci-upgrade/terraform-vars.yml - ensure: - do: - - task: state-file - file: platform-automation-tasks/tasks/make-git-commit.yml - params: - GIT_AUTHOR_NAME: platform-automation-bot - GIT_AUTHOR_EMAIL: ((platform-automation-email)) - COMMIT_MESSAGE: "delete-vm update state file for ci-upgrade" - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: ci-upgrade/state.yml - input_mapping: - repository: deployments - file-source: generated-state - image: platform-automation-image - - put: deployments - params: - rebase: true - repository: repository-commit - - -#@ for deployment in deployments_without_gcp: -#@ env_name = deployment.env_name -#@ opsman_glob = deployment.opsman_glob -#@ tags = deployment.tags -#@ paving_dir = deployment.paving_dir -- name: #@ "run-tasks-in-" + env_name + "-job" - serial: true - serial_groups: - - #@ env_name - plan: - - in_parallel: - - get: om - passed: - - build-binaries-image-combined - tags: #@ tags - - get: version - passed: - - build-binaries-image-combined - tags: #@ tags - - get: platform-automation-tasks - resource: rc-tasks-s3 - passed: - - build-binaries-image-combined - params: - unpack: true - tags: #@ tags - - get: binaries-table - passed: - - build-binaries-image-combined - tags: #@ tags - - get: docs-platform-automation - tags: #@ tags - - get: paving - tags: #@ tags - - get: deployments - tags: #@ tags -#@ if deployment.paving_dir == "nsxt": - - get: platform-automation-image - passed: [ build-binaries-image-combined ] - resource: rc-image-s3-vsphere - tags: #@ tags - params: - unpack: true - - get: rc-image-s3 - passed: [ build-binaries-image-combined ] - trigger: true - params: - unpack: true - tags: #@ tags -#@ else: - - get: platform-automation-image - passed: [ build-binaries-image-combined ] - resource: rc-image-s3 - tags: #@ tags - params: - unpack: true - - get: rc-image-s3-vsphere - passed: [ build-binaries-image-combined ] - trigger: true - params: - unpack: true - tags: #@ tags -#@ end - - get: opsman-image - trigger: true - params: - globs: - - #@ opsman_glob - tags: #@ tags - - task: run-terraform - attempts: 2 - tags: #@ tags - file: docs-platform-automation/ci/tasks/create-infrastructure/task.yml - params: - IAAS: #@ paving_dir - DEPLOYMENT_NAME: #@ env_name - OM_PASSWORD: ((opsman-login.password)) - OM_USERNAME: ((opsman-login.username)) - PLATFORM_AUTOMATION_EMAIL: ((platform-automation-email)) - ensure: - put: deployments - tags: #@ tags - params: - rebase: true - repository: deployments - - task: prepare-tasks-with-secrets - tags: #@ tags - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - config: paving - tasks: platform-automation-tasks - vars: deployments - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: #@ "config/ci/configuration/" + paving_dir + "/director.yml config/ci/configuration/auth.yml" - VARS_PATHS: #@ "vars/" + env_name + "/terraform-vars.yml" - - task: create-vm - tags: #@ tags - file: platform-automation-tasks/tasks/create-vm.yml - attempts: 3 - input_mapping: - image: opsman-image - state: deployments - config: paving - vars: deployments - params: - STATE_FILE: #@ env_name + "/state.yml" - OPSMAN_CONFIG_FILE: #@ "ci/configuration/" + paving_dir + "/ops-manager.yml" - VARS_FILES: #@ "vars/" + env_name + "/terraform-vars.yml" - GOTRACEBACK: all - image: platform-automation-image - ensure: - do: - - task: state-file - tags: #@ tags - file: platform-automation-tasks/tasks/make-git-commit.yml - params: - GIT_AUTHOR_NAME: platform-automation-bot - GIT_AUTHOR_EMAIL: ((platform-automation-email)) - COMMIT_MESSAGE: #@ "create-vm update state file for " + env_name - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: #@ env_name + "/state.yml" - input_mapping: - repository: deployments - file-source: generated-state - image: platform-automation-image - - put: deployments - tags: #@ tags - params: - rebase: true - repository: repository-commit - - task: configure-authentication - tags: #@ tags - file: platform-automation-tasks/tasks/configure-authentication.yml - image: platform-automation-image - attempts: 20 - input_mapping: - env: deployments - config: paving - params: - ENV_FILE: #@ env_name + "/env.yml" - AUTH_CONFIG_FILE: ci/configuration/auth.yml - VARS_FILES: #@ "env/" + env_name + "/terraform-vars.yml" - - task: staged-director-config - tags: #@ tags - file: platform-automation-tasks/tasks/staged-director-config.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: #@ env_name + "/env.yml" - - task: configure-director - tags: #@ tags - file: platform-automation-tasks/tasks/configure-director.yml - image: platform-automation-image - input_mapping: - env: deployments - config: paving - params: - ENV_FILE: #@ env_name + "/env.yml" - VARS_FILES: #@ "env/" + env_name + "/terraform-vars.yml" - DIRECTOR_CONFIG_FILE: #@ "ci/configuration/" + paving_dir + "/director.yml" - - task: apply-director-changes - tags: #@ tags - file: platform-automation-tasks/tasks/apply-director-changes.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: #@ env_name + "/env.yml" - - task: staged-director-config - tags: #@ tags - file: platform-automation-tasks/tasks/staged-director-config.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: #@ env_name + "/env.yml" - - task: delete-installation - tags: #@ tags - file: platform-automation-tasks/tasks/delete-installation.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: #@ env_name + "/env.yml" - - task: #@ "delete-" + env_name + "-opsman-vm" - tags: #@ tags - file: platform-automation-tasks/tasks/delete-vm.yml - input_mapping: - state: generated-state - config: paving - vars: deployments - params: - OPSMAN_CONFIG_FILE: #@ "ci/configuration/" + paving_dir + "/ops-manager.yml" - VARS_FILES: #@ "vars/" + env_name + "/terraform-vars.yml" - image: platform-automation-image - ensure: - do: - - task: state-file - tags: #@ tags - file: platform-automation-tasks/tasks/make-git-commit.yml - params: - GIT_AUTHOR_NAME: platform-automation-bot - GIT_AUTHOR_EMAIL: ((platform-automation-email)) - COMMIT_MESSAGE: #@ "delete-vm update state file for " + env_name - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: #@ env_name + "/state.yml" - input_mapping: - repository: deployments - file-source: generated-state - image: platform-automation-image - - put: deployments - tags: #@ tags - params: - rebase: true - repository: repository-commit -#@ end - -#@ for deployment in data.values.deployments: -#@ env_name = deployment.env_name -#@ opsman_glob = deployment.opsman_glob -#@ tags = deployment.tags -#@ paving_dir = deployment.paving_dir -- name: #@ env_name + "-delete-infrastructure" - serial_groups: - - #@ env_name - plan: - - in_parallel: -#@ if deployment.enable_timed_trigger: - - get: nightly - trigger: true - tags: #@ tags -#@ end - - get: docs-platform-automation - tags: #@ tags - - get: paving - tags: #@ tags - - get: deployments - tags: #@ tags - - get: platform-automation-image - resource: rc-image-s3 - params: - unpack: true - tags: #@ tags - - get: platform-automation-tasks - resource: rc-tasks-s3 - params: - unpack: true - tags: #@ tags - - task: prepare-tasks-with-secrets - tags: #@ tags - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - config: paving - tasks: platform-automation-tasks - vars: deployments - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: #@ "config/ci/configuration/" + paving_dir + "/director.yml config/ci/configuration/" + paving_dir + "/ops-manager.yml config/ci/configuration/auth.yml" - VARS_PATHS: #@ "vars/" + env_name + "/terraform-vars.yml" - - try: - task: delete-installation - tags: #@ tags - file: platform-automation-tasks/tasks/delete-installation.yml - input_mapping: - env: deployments - params: - ENV_FILE: #@ env_name + "/env.yml" - image: platform-automation-image - - try: - task: delete-opsman-vm - tags: #@ tags - file: platform-automation-tasks/tasks/delete-vm.yml - image: platform-automation-image - input_mapping: - image: opsman-image - state: deployments - config: paving - vars: deployments - params: - STATE_FILE: #@ env_name + "/state.yml" - OPSMAN_CONFIG_FILE: #@ "ci/configuration/" + paving_dir + "/ops-manager.yml" - VARS_FILES: #@ "vars/" + env_name + "/terraform-vars.yml" - ensure: - do: - - task: state-file - tags: #@ tags - file: platform-automation-tasks/tasks/make-git-commit.yml - image: platform-automation-image - params: - GIT_AUTHOR_NAME: platform-automation-bot - GIT_AUTHOR_EMAIL: ((platform-automation-email)) - COMMIT_MESSAGE: #@ "delete-vm update state file for " + env_name - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: #@ env_name + "/state.yml" - input_mapping: - repository: deployments - file-source: generated-state - - put: deployments - tags: #@ tags - params: - rebase: true - repository: repository-commit - - try: - task: delete-infrastructure - tags: #@ tags - file: docs-platform-automation/ci/tasks/delete-infrastructure/task.yml - params: - IAAS: #@ paving_dir - DEPLOYMENT_NAME: #@ env_name - PLATFORM_AUTOMATION_EMAIL: ((platform-automation-email)) - ensure: - put: deployments - tags: #@ tags - params: - rebase: true - repository: deployments - - task: leftovers - tags: #@ tags - file: docs-platform-automation/ci/tasks/leftovers.yml - params: - BBL_IAAS: #@ paving_dir - BBL_AWS_ACCESS_KEY_ID: ((s3.access_key_id)) - BBL_AWS_SECRET_ACCESS_KEY: ((s3.secret_access_key)) - BBL_AWS_REGION: ((s3.region_name)) - BBL_AZURE_CLIENT_ID: ((azure.client_id)) - BBL_AZURE_CLIENT_SECRET: ((azure.client_secret)) - BBL_AZURE_TENANT_ID: ((azure.tenant_id)) - BBL_AZURE_SUBSCRIPTION_ID: ((azure.subscription_id)) - BBL_GCP_SERVICE_ACCOUNT_KEY: ((gcp.service_account)) - BBL_NSXT_PASSWORD: ((nsx.password)) - BBL_NSXT_USERNAME: ((nsx.username)) - BBL_NSXT_MANAGER_HOST: ((nsx.url)) - FILTER: #@ env_name - DRY_RUN: false - NO_CONFIRM: true - - task: remove-state-files - tags: #@ tags - file: docs-platform-automation/ci/tasks/delete-state-file.yml - params: - DEPLOYMENT: #@ env_name - PLATFORM_AUTOMATION_EMAIL: ((platform-automation-email)) - ensure: - put: deployments - tags: #@ tags - params: - rebase: true - repository: deployments -#@ end -- name: check-for-secrets-in-tasks - plan: - - in_parallel: - - get: daily - trigger: true - - get: deployments - - get: docs-platform-automation - - task: check-for-secrets - file: docs-platform-automation/ci/tasks/secrets-verifier/task.yml - params: - FLY_USERNAME: ((fly.username)) - FLY_PASSWORD: ((fly.password)) - FLY_TARGET: ((fly.target)) - SECRET_ALLOWLIST: ((secret-allowlist)) - on_failure: - do: - - task: create-secret-slack-notification - file: docs-platform-automation/ci/tasks/create-secret-slack-notification.yml - - put: slack - params: - silent: true - text_file: notification-text/text -- name: create-pks-cluster-in-reference-pipeline - serial: true - plan: - - in_parallel: - - get: docs-platform-automation - - get: deployments - - get: pks-cli - params: - globs: - - 'tkgi-linux-amd64*' - - get: rc-image-s3 - passed: - - build-binaries-image-combined - - get: rc-image-s3-vsphere - passed: - - build-binaries-image-combined - - get: rc-tasks-s3 - passed: - - build-binaries-image-combined - trigger: true - - task: create-pks-cluster - file: docs-platform-automation/ci/tasks/create-pks-cluster/task.yml - params: - ENV_FILE: deployments/reference-gcp/env.yml -- name: additional-task-testing - serial: true - plan: - - in_parallel: - - get: docs-platform-automation - - get: os-conf-release - version: - version: 22.0.0 - - get: docs-platform-automation-reference-pipeline-config - - get: rc-image-s3-vsphere - passed: - - create-pks-cluster-in-reference-pipeline - - get: platform-automation-image - resource: rc-image-s3 - params: - unpack: true - passed: - - create-pks-cluster-in-reference-pipeline - trigger: true - - get: platform-automation-tasks - resource: rc-tasks-s3 - params: - unpack: true - passed: - - create-pks-cluster-in-reference-pipeline - trigger: true - - get: om - passed: - - build-binaries-image-combined - - get: binaries-table - passed: - - build-binaries-image-combined - - get: pas-windows - params: - globs: - - '*.pivotal' - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - input_mapping: - files: docs-platform-automation-reference-pipeline-config - output_mapping: - interpolated-files: env - params: - CREDHUB_CLIENT: ((credhub-client)) - CREDHUB_SECRET: ((credhub-secret)) - CREDHUB_SERVER: ((credhub-server)) - CREDHUB_CA_CERT: ((credhub-ca-cert)) - PREFIX: /concourse/main/reference-pipeline - INTERPOLATION_PATHS: . - - in_parallel: - - do: - - task: ensure-clis-versions-run - image: platform-automation-image - config: - platform: linux - run: - path: bash - args: - - -c - - | - set -eux - - # IAASes - openstack --version - govc version - az --version - gcloud --version - aws --version - - # vmware owned - bosh --version - credhub --version - winfs-injector --help - om --version - - do: - - task: replicate-product - image: platform-automation-image - file: platform-automation-tasks/tasks/replicate-product.yml - input_mapping: - product: pas-windows - params: - REPLICATED_NAME: awesome - - task: check-replicated-tile - image: platform-automation-image - config: - platform: linux - inputs: - - name: replicated-product - run: - path: bash - args: - - -c - - | - set -eux - - test -e replicated-product/awesome.pivotal - unzip -t replicated-product/awesome.pivotal - - - do: - - task: get-ca-cert - image: platform-automation-image - config: - platform: linux - outputs: - - name: config - run: - path: bash - args: - - -c - - | - set -eux - - echo quit | openssl s_client -showcerts \ - -servername self-signed.badssl.com \ - -connect self-signed.badssl.com:443 > config/cacert.pem - - task: prepare-image - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-image.yml - params: - CA_CERT_FILES: cacert.pem - - task: test-ca-cert-worked - image: platform-automation-image - config: - platform: linux - run: - path: bash - args: - - -c - - | - set -eux - - curl -vvv -L https://self-signed.badssl.com - - do: - - task: backup-tkgi - image: platform-automation-image - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - ENV_FILE: foundations/config/env.yml - OPSMAN_SSH_PRIVATE_KEY: ((vsphere_private_ssh_key)) - - task: check-backup-tkgi - image: platform-automation-image - config: - platform: linux - inputs: - - name: backup - run: - path: bash - args: - - -c - - | - set -eux - - test -e backup/pivotal-container-service*_clusters_*.tgz - tar -tzf backup/pivotal-container-service*_clusters_*.tgz > /dev/null - - do: - - task: download-and-upload-product - image: platform-automation-image - file: platform-automation-tasks/tasks/download-and-upload-product.yml - input_mapping: - config: env - params: - ENV_FILE: foundations/config/env.yml - CONFIG_FILE: download-product-pivnet/download-healthwatch.yml - - do: - - task: backup-director - image: platform-automation-image - file: platform-automation-tasks/tasks/backup-director.yml - params: - ENV_FILE: foundations/config/env.yml - OPSMAN_SSH_PRIVATE_KEY: ((vsphere_private_ssh_key)) - - task: check-backup-director - image: platform-automation-image - config: - platform: linux - inputs: - - name: backup - run: - path: bash - args: - - -c - - | - set -eux - - test -e backup/director_*.tgz - tar -tzf backup/director_*.tgz > /dev/null - - do: - - task: update-runtime-config - image: platform-automation-image - file: platform-automation-tasks/tasks/update-runtime-config.yml - input_mapping: - config: docs-platform-automation - releases: os-conf-release - params: - ENV_FILE: foundations/config/env.yml - CONFIG_FILE: ci/config/runtime-config.yml - NAME: my-runtime-config - OPSMAN_SSH_PRIVATE_KEY: ((vsphere_private_ssh_key)) - -#! bump jobs -- name: bump-previous-versions-trigger - serial: true - plan: - - in_parallel: - - get: rc-image-s3-vsphere - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - additional-task-testing - - get: binaries-image - resource: rc-image-s3 - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - additional-task-testing - params: - unpack: true - - get: binaries-table - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - additional-task-testing -- name: bump-test-image-dependency-stability - plan: - - in_parallel: - - get: docs-platform-automation - - get: binaries-image - resource: rc-image-s3 - passed: [ bump-previous-versions-trigger ] - trigger: true - params: - unpack: true - - get: osl-validated-image-receipt-s3 - - task: generate-stub-version - config: - image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing - platform: linux - outputs: - - name: version - run: - path: bash - args: - - -c - - | - set -eux - echo "test" > version/version - - task: generate-dpkg-list-for-osl - file: docs-platform-automation/ci/tasks/generate-dpkg-list-for-OSL.yml - image: binaries-image - - task: check-osl-reuse-validity - file: docs-platform-automation/ci/tasks/check-osl-reuse-validity.yml -#@ for version in data.values.versions: -- name: #@ "update-" + version.number - serial_groups: [ bump ] - plan: - - in_parallel: - - get: osl - - get: odp - - get: #@ "platform-automation-" + version.number - - get: #@ "docs-platform-automation-" + version.number - - get: docs-platform-automation-with-docs - - get: version - resource: #@ "version-" + version.number - params: - bump: patch - - get: rc-image-s3-vsphere - passed: [ bump-previous-versions-trigger ] - params: - unpack: true - - get: binaries-image - resource: rc-image-s3 - passed: [ bump-previous-versions-trigger ] - trigger: true - params: - unpack: true - - get: binaries-table - passed: [ bump-previous-versions-trigger ] - - task: create-release-notes-for-patch - file: docs-platform-automation-with-docs/ci/tasks/pivnet-release/generate-release-notes.yml - params: - GITHUB_SSH_KEY: ((platform_automation_docs.private_key)) - - task: create-release-file - file: docs-platform-automation-with-docs/ci/tasks/package-for-release/platform-automation.yml - input_mapping: - docs-platform-automation: docs-platform-automation-with-docs - platform-automation: #@ "platform-automation-" + version.number - platform-automation-image: binaries-image - #@ if version.vsphere_image: - vsphere-platform-automation-image: rc-image-s3-vsphere - #@ end - - in_parallel: - - task: test-docker-import - privileged: true - file: docs-platform-automation-with-docs/ci/tasks/test-docker-import/task.yml - - task: test-task - file: #@ "platform-automation-" + version.number + "/tasks/test.yml" - image: platform-automation-image - input_mapping: - platform-automation-tasks: #@ "platform-automation-" + version.number - - task: generate-platform-automation-metadata - file: docs-platform-automation-with-docs/ci/tasks/pivnet-release/generate-platform-automation-metadata-bump.yml - params: - ALL_USERS: false - - task: generate-dpkg-list-for-osl - file: docs-platform-automation-with-docs/ci/tasks/generate-dpkg-list-for-OSL.yml - image: binaries-image - - in_parallel: - - put: #@ "platform-automation-" + version.number - params: - repository: #@ "platform-automation-" + version.number - tag: version/version - tag_prefix: v - rebase: true - - put: image-receipt-s3 - params: - file: rc-image-receipt-s3/image-receipt-* - acl: public-read - - put: #@ "pivnet-release-" + version.number - params: - file_glob: packaged-product/* - metadata_file: metadata/metadata.yml - s3_filepath_prefix: ((pivnet_s3_filepath_prefix)) - - put: #@ "version-" + version.number - params: - file: version/version - - put: slack - params: - silent: true - text: #@ "A new patch version was released to Tanzunet: " + version.number -#@ end -- name: empty-cve-patch-notes-file - plan: - - in_parallel: - - get: rc-image-s3 - passed: - #@ for version in data.values.versions: - - #@ "update-" + version.number - #@ end - trigger: true - - get: docs-platform-automation-with-docs - - task: empty-cve-patch-notes - file: docs-platform-automation-with-docs/ci/tasks/pivnet-release/empty-cve-patch-notes.yml - input_mapping: - docs-platform-automation: docs-platform-automation-with-docs - - put: docs-platform-automation-with-docs - params: - repository: docs-platform-automation - rebase: true -#@ for bump in ["patch", "minor", "major"]: -- name: #@ "bump-om-" + bump - plan: - - in_parallel: - - get: om - passed: - #@ for/end deployment in deployments_without_gcp: - - #@ "run-tasks-in-" + deployment.env_name + "-job" - - upgrade-opsman-gcp - - additional-task-testing - - get: om-version - params: - bump: #@ bump - - put: om - params: - repository: om - only_tag: true - tag: om-version/version - - task: build - file: om/ci/tasks/build/task.yml - params: - GITHUB_TOKEN: ((om.access_token)) - - put: om-version - params: - bump: #@ bump -#@ end -- name: build-concourse-buildinfo-image - plan: - - in_parallel: - - get: docs-platform-automation - - task: build-image - privileged: true - config: - platform: linux - image_resource: - type: registry-image - source: - repository: concourse/oci-build-task - inputs: - - name: docs-platform-automation - outputs: - - name: image - params: - CONTEXT: docs-platform-automation/ci/dockerfiles - DOCKERFILE: docs-platform-automation/ci/dockerfiles/Dockerfile.build-info-resource - run: - path: build - - put: build-info-image - params: - image: image/image.tar -- name: send-srp-report - plan: - - in_parallel: - - get: srp-helper - - get: docs-platform-automation - - get: docs-platform-automation-v5.0 - - get: version-v5.0 - - put: concourse-build-info - params: - metadata: - key3: value3 - - - task: srp-collect-and-submit - file: docs-platform-automation/ci/tasks/srp-collect-and-submit/task.yml - params: - CLIENT_ID: ((srp-mds-client-id)) - CLIENT_SECRET: ((srp-mds-client-secrets)) - DOMAIN: platform-automation diff --git a/ci/config/runtime-config.yml b/ci/config/runtime-config.yml deleted file mode 100644 index 456f9b45..00000000 --- a/ci/config/runtime-config.yml +++ /dev/null @@ -1,14 +0,0 @@ -releases: - - name: os-conf - version: 22.0.0 - -addons: - - name: os-configuration - jobs: - - name: login_banner - release: os-conf - properties: - login_banner: - text: | - Authorized Use Only. - Unauthorized use will be prosecuted to the fullest extent of the law. diff --git a/ci/dockerfiles/Dockerfile.binaries b/ci/dockerfiles/Dockerfile.binaries deleted file mode 100644 index 80f43d88..00000000 --- a/ci/dockerfiles/Dockerfile.binaries +++ /dev/null @@ -1,24 +0,0 @@ -ARG base_image=internalpcfplatformautomation/platform-automation:packages -FROM ${base_image} - -# copy files directly from the inputs/resource of concourse -COPY bbr-cli/*linux* /usr/bin/bbr -COPY bosh-cli/bosh-cli-* /usr/bin/bosh -COPY isolation-segment-replicator/replicator-linux* /usr/bin/iso-replicator -COPY om-cli/om /usr/bin/om -COPY winfs-injector/winfs-injector-linux* /usr/bin/winfs-injector -COPY govc-cli/*Linux* /tmp -COPY credhub-cli/*linux* /tmp - -RUN tar xvf /tmp/govc* && mv govc /usr/bin/govc -RUN tar xvf /tmp/credhub* && mv credhub /usr/bin/credhub - -# Make copied binaries executable -RUN chmod +x \ - /usr/bin/bbr \ - /usr/bin/bosh \ - /usr/bin/credhub \ - /usr/bin/govc \ - /usr/bin/iso-replicator \ - /usr/bin/om \ - /usr/bin/winfs-injector diff --git a/ci/dockerfiles/Dockerfile.build-info-resource b/ci/dockerfiles/Dockerfile.build-info-resource deleted file mode 100644 index 92be6a49..00000000 --- a/ci/dockerfiles/Dockerfile.build-info-resource +++ /dev/null @@ -1,6 +0,0 @@ -FROM concourse/buildroot:base - -ADD build-info-resource-files/check /opt/resource/ -ADD build-info-resource-files/in /opt/resource/ -ADD build-info-resource-files/out /opt/resource/ -RUN chmod +x /opt/resource/* diff --git a/ci/dockerfiles/Dockerfile.ci b/ci/dockerfiles/Dockerfile.ci deleted file mode 100644 index 2cbd1dab..00000000 --- a/ci/dockerfiles/Dockerfile.ci +++ /dev/null @@ -1,91 +0,0 @@ -FROM golang:latest - -RUN apt-get -y update && apt-get -y install rsync build-essential bash zip unzip curl gettext jq python3-pip python3-dev git wget tar xz-utils -RUN pip3 install --upgrade pip - -RUN wget "https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz" && \ - tar --xz -xvf shellcheck-stable.linux.x86_64.tar.xz && \ - cp shellcheck-stable/shellcheck /usr/bin/ - -RUN wget "https://dl.minio.io/server/minio/release/linux-amd64/minio" && \ - chmod +x minio && \ - mv minio /usr/bin/minio - -RUN wget "https://dl.minio.io/client/mc/release/linux-amd64/mc" && \ - chmod +x mc && \ - mv mc /usr/bin/mc - -RUN wget https://releases.hashicorp.com/terraform/1.0.11/terraform_1.0.11_linux_amd64.zip && \ - unzip terraform_1.0.11_linux_amd64.zip -d /usr/bin - -# install BOSH -RUN wget "https://github.com/cloudfoundry/bosh-cli/releases/download/v6.2.1/bosh-cli-6.2.1-linux-amd64" -O bosh -RUN chmod +x ./bosh && \ - mv ./bosh /usr/bin/bosh - -# install credhub -RUN wget "https://github.com/cloudfoundry-incubator/credhub-cli/releases/download/2.6.2/credhub-linux-2.6.2.tgz" -O credhub.tgz -RUN tar xzf credhub.tgz -RUN chmod +x ./credhub && \ - mv ./credhub /usr/bin/credhub - -# install fly -RUN wget "https://platform-automation.ci.cf-app.com/api/v1/cli?arch=amd64&platform=linux" -O fly -RUN chmod +x ./fly && \ - mv ./fly /usr/bin/fly - -# install https://github.com/sclevine/yj -RUN wget "https://github.com/sclevine/yj/releases/download/v4.0.0/yj-linux" -O yj -RUN chmod +x ./yj && \ - mv ./yj /usr/bin/yj - -# install om -RUN git clone https://github.com/pivotal-cf/om -RUN cd om && \ - go build . && \ - mv om /usr/bin/ && \ - cd - - -# rspec -RUN apt-get -y install ruby ruby-dev && \ - echo "gem: --no-document" >> /etc/gemrc && \ - gem install rspec english bundler - -# uaac -RUN gem install cf-uaac - -# used by `delete-terraformed-ops-manager` -RUN pip3 install awscli - -# govc -#RUN go install github.com/vmware/govmomi/govc@latest - -# govc binary -RUN wget https://github.com/vmware/govmomi/releases/latest/download/govc_Linux_x86_64.tar.gz && \ - tar xvfz govc_Linux_x86_64.tar.gz -C /usr/local/bin govc - -# openstack -RUN pip3 install python-openstackclient - -# gcloud -RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk-bionic main" | tee /etc/apt/sources.list.d/google-cloud-sdk.list -RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -RUN apt-get -y update && apt-get -y install --no-install-recommends google-cloud-sdk - -# azure -RUN echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ bionic main" | tee /etc/apt/sources.list.d/azure-cli.list -RUN curl -L https://packages.microsoft.com/keys/microsoft.asc | apt-key add - -RUN apt-get install apt-transport-https && apt-get update && apt-get install azure-cli - - -RUN git config --global user.name "platform-automation" -RUN git config --global user.email "platformautomation@groups.vmware.com" - -# used by test -RUN go install github.com/onsi/ginkgo/ginkgo@latest - -# use to cleanup IAASes -RUN wget -O /usr/bin/leftovers "https://github.com/genevieve/leftovers/releases/download/v0.59.0/leftovers-v0.59.0-linux-amd64" -RUN chmod +x /usr/bin/leftovers - -ENV CGO_ENABLED=0 diff --git a/ci/dockerfiles/Dockerfile.om b/ci/dockerfiles/Dockerfile.om deleted file mode 100644 index 4ebd9319..00000000 --- a/ci/dockerfiles/Dockerfile.om +++ /dev/null @@ -1,7 +0,0 @@ -FROM golang:1.15 - -RUN git clone https://github.com/pivotal-cf/om -RUN cd om && go build -o /usr/bin/om main.go -RUN apt-get update && \ - apt-get -y install gettext-base && \ - apt-get clean diff --git a/ci/dockerfiles/Dockerfile.packages b/ci/dockerfiles/Dockerfile.packages deleted file mode 100644 index 5455fb5c..00000000 --- a/ci/dockerfiles/Dockerfile.packages +++ /dev/null @@ -1,48 +0,0 @@ -FROM ubuntu:bionic - -# Install necessary binary -RUN apt-get update && apt-get -y --no-install-recommends install \ - bash \ - build-essential \ - ca-certificates \ - curl \ - gettext \ - git \ - netcat-openbsd \ - python3-dev \ - python3-setuptools \ - rsync \ - ssh \ - unzip \ - zip \ - && true -# netcat for `bosh ssh` -- the why is explained here: https://github.com/cloudfoundry/bosh-cli/issues/374 - -# pip -# Install via source as upgrading the pip installed from Ubuntu leaves unwanted artifacts. -# ERROR: This script does not work on Python 3.6 The minimum supported Python version is 3.7. -# Please use https://bootstrap.pypa.io/pip/3.6/get-pip.py instead. -RUN curl https://bootstrap.pypa.io/pip/3.6/get-pip.py -o get-pip.py && \ - python3 get-pip.py - -# gcloud -RUN curl https://sdk.cloud.google.com > install.sh && bash install.sh --disable-prompts -ENV PATH /root/google-cloud-sdk/bin:$PATH -RUN ln -s /root/google-cloud-sdk/bin/* /usr/local/bin/ -RUN gcloud --version - -# azure -RUN pip3 install azure-cli --use-feature=2020-resolver -RUN az --version - -# aws -RUN pip3 install awscli --use-feature=2020-resolver -RUN aws --version - -# openstack -RUN pip3 install python-openstackclient --use-feature=2020-resolver -RUN openstack --version - -# Upgrade all packages -RUN apt-get upgrade -y -RUN apt-get autoremove -y build-essential python3-dev diff --git a/ci/dockerfiles/Dockerfile.python-mitigation b/ci/dockerfiles/Dockerfile.python-mitigation deleted file mode 100644 index 601ae33f..00000000 --- a/ci/dockerfiles/Dockerfile.python-mitigation +++ /dev/null @@ -1,12 +0,0 @@ -FROM internalpcfplatformautomation/platform-automation:packages - -# remove azure cli -RUN pip3 install pip-autoremove -RUN pip-autoremove -y azure-cli - -# remove pip -RUN pip3 uninstall -y pip-autoremove pip - -# remove gcloud CLI -RUN rm -Rf /usr/local/bin/gcloud -RUN rm -Rf /root/google-cloud-sdk diff --git a/ci/dockerfiles/Dockerfile.vsphere-only b/ci/dockerfiles/Dockerfile.vsphere-only deleted file mode 100644 index a1bb92c6..00000000 --- a/ci/dockerfiles/Dockerfile.vsphere-only +++ /dev/null @@ -1,19 +0,0 @@ -ARG base_image=internalpcfplatformautomation/platform-automation:testing -FROM ${base_image} - -# remove azure cli -RUN pip3 install pip-autoremove -RUN pip-autoremove -y azure-cli - -# remove AWS CLI -RUN pip-autoremove -y awscli - -# remove Openstack CLI -RUN pip-autoremove -y python-openstackclient - -# remove pip -RUN pip3 uninstall -y pip-autoremove pip - -# remove gcloud CLI -RUN rm -Rf /usr/local/bin/gcloud -RUN rm -Rf /root/google-cloud-sdk diff --git a/ci/dockerfiles/build-info-resource-files/check b/ci/dockerfiles/build-info-resource-files/check deleted file mode 100644 index 0c341e33..00000000 --- a/ci/dockerfiles/build-info-resource-files/check +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo "[]" diff --git a/ci/dockerfiles/build-info-resource-files/in b/ci/dockerfiles/build-info-resource-files/in deleted file mode 100644 index a5df013e..00000000 --- a/ci/dockerfiles/build-info-resource-files/in +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -cd $1 - -echo "$BUILD_ID" > build-id -echo "$BUILD_NAME" > build-name -echo "$BUILD_JOB_NAME" > build-job-name -echo "$BUILD_PIPELINE_NAME" > build-pipeline-name -echo "$ATC_EXTERNAL_URL" > atc-external-url -echo "{\"version\":{\"build\":\"$BUILD_ID\"}}" diff --git a/ci/dockerfiles/build-info-resource-files/out b/ci/dockerfiles/build-info-resource-files/out deleted file mode 100644 index 795b099d..00000000 --- a/ci/dockerfiles/build-info-resource-files/out +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo "{\"version\":{\"build\":\"$BUILD_ID\"}}" diff --git a/ci/docs/pipeline.yml b/ci/docs/pipeline.yml deleted file mode 100644 index 9b22cde6..00000000 --- a/ci/docs/pipeline.yml +++ /dev/null @@ -1,235 +0,0 @@ -#@ load("@ytt:data", "data") ---- -#@ excluded_versions = [] -#@ for version in data.values.versions: -#@ if getattr(version, "exclude_from_dropdown", False): -#@ excluded_versions.append(version.branch) -#@ end -#@ end -#@ excluded_versions = ",".join(excluded_versions) -resources: -#@ for version in data.values.versions: -#@ branch = version.branch -#@ tag_filter = getattr(version, "tag_filter", False) -- name: #@ "docs-platform-automation-" + branch - type: git - source: - branch: #@ branch - uri: git@github.com:pivotal/docs-platform-automation - private_key: ((platform_automation_docs.private_key)) -- name: #@ "platform-automation-" + branch - type: git - source: - branch: #@ branch - private_key: ((platform_automation.private_key)) - uri: git@github.com:pivotal/platform-automation -#@ if tag_filter: - tag_filter: #@ tag_filter -#@ end -#@ end -- name: docs-platform-automation - type: git - source: - uri: https://github.com/pivotal/docs-platform-automation -- name: docs-platform-automation-reference-pipeline-config - type: git - source: - branch: develop - private_key: ((platform_automation_docs_reference.private_key)) - uri: git@github.com:pivotal/docs-platform-automation-reference-pipeline-config -- name: docs-app-staging - type: cf - source: - api: ((pcf1.api-endpoint)) - username: ((pcf1.svc-account-name)) - password: ((pcf1.svc-account-password)) - organization: ((pcf1.docs-org)) - space: ((pcf1.docs-staging-space)) -- name: docs-app-production - type: cf - source: - api: ((pcf1.api-endpoint)) - username: ((pcf1.svc-account-name)) - password: ((pcf1.svc-account-password)) - organization: ((pcf1.docs-org)) - space: ((pcf1.docs-prod-space)) -- name: mkdocs-pivotal-theme - type: git - source: - uri: https://github.com/pivotal/mkdocs-pivotal-theme -- name: image - type: registry-image - source: - password: ((harbor.password)) - repository: ((harbor.docs-repository)) - username: ((harbor.username)) - tag: latest - -jobs: -- name: build-and-push-image - serial: true - plan: - - get: mkdocs-pivotal-theme - trigger: true - - task: build - privileged: true - config: - platform: linux - image_resource: - type: registry-image - source: - repository: harbor-repo.vmware.com/dockerhub-proxy-cache/vito/oci-build-task - params: - DOCKERFILE: mkdocs-pivotal-theme/ci/Dockerfile - BUILD_ARG_IMAGE: harbor-repo.vmware.com/dockerhub-proxy-cache/library/ubuntu:focal - inputs: - - name: mkdocs-pivotal-theme - outputs: - - name: image - run: - path: build - - put: image - params: {image: image/image.tar} - -- name: deploy-to-staging - serial: true - plan: - - in_parallel: - - get: mkdocs-pivotal-theme -#@ for version in data.values.versions: - - get: #@ "platform-automation-" + version.branch - trigger: true - - get: #@ "docs-platform-automation-" + version.branch - trigger: true -#@ end - - get: docs-platform-automation-reference-pipeline-config - - get: docs-platform-automation - - in_parallel: -#@ for version in data.values.versions: -#@ if not getattr(version, "supported", False): -#@ continue -#@ end - - task: #@ "run-test-for-tasks-in-docs-platform-automation-" + version.branch - file: docs-platform-automation/ci/tasks/docs-task-test.yml - input_mapping: - docs-platform-automation: #@ "docs-platform-automation-" + version.branch -#@ end - - task: generate-docs-app - config: - platform: linux - inputs: - - name: mkdocs-pivotal-theme -#@ for version in data.values.versions: - #@ if not getattr(version, "supported", False): - - name: #@ "platform-automation-" + version.branch - #@ end - - name: #@ "docs-platform-automation-" + version.branch -#@ end - - name: docs-platform-automation-reference-pipeline-config - outputs: - - name: docs - image_resource: - type: registry-image - source: - repository: ((harbor.docs-repository)) - run: - path: mkdocs-pivotal-theme/ci/build-docs/build_docs.rb - args: - - '--output-dir' - - './docs' - - '--docs-dir' - - '.' - - '--docs-prefix' - - 'docs-platform-automation' - - '--site-prefix' - - 'platform-automation' - - '--domains' - - 'docs-pcf-staging.tas.vmware.com docs-pcf-staging.sc2-04-pcf1-apps.oc.vmware.com' - - '--ignore-directories' - - 'docs-platform-automation-reference-pipeline-config' - - '--exclude-from-dropdown' - - #@ excluded_versions - - task: set-redirects - file: docs-platform-automation/ci/tasks/additional-nginx-rewrite-rules.yml - - put: docs-app-staging - params: - manifest: docs/manifest.yml - path: docs - current_app_name: docs-platform-automation - show_app_log: true - - task: prepare-for-lint - config: - platform: linux - image_resource: - type: registry-image - source: - repository: ((harbor.docs-repository)) - run: - path: sleep - args: ['1m'] - - - task: link-linter - file: mkdocs-pivotal-theme/ci/linter/task.yml - input_mapping: - site: docs - vars: - docs_repository: ((harbor.docs-repository)) - site_url: https://docs-pcf-staging.tas.vmware.com/platform-automation/ - allow_list: "github.com.*edit|v3.0/pipeline-design/configuration-management-strategies.html|https://fonts.gstatic.com|https://concourse-ci.org/task-step.html#input_mapping|https://concourse-ci.org/credhub-credential-manager.html#credential-lookup-rules|https://github.com/concourse/s3-resource#source-configuration|#L\\d+" - -- name: deploy-to-production - serial: true - plan: - - in_parallel: - - get: image -#@ for version in data.values.versions: - - get: #@ "platform-automation-" + version.branch - - get: #@ "docs-platform-automation-" + version.branch - trigger: true -#@ end - - get: mkdocs-pivotal-theme - - get: docs-platform-automation-reference-pipeline-config - - get: docs-platform-automation - - task: generate-docs-app - config: - platform: linux - inputs: - - name: mkdocs-pivotal-theme -#@ for version in data.values.versions: - #@ if not getattr(version, "supported", False): - - name: #@ "platform-automation-" + version.branch - #@ end - - name: #@ "docs-platform-automation-" + version.branch -#@ end - - name: docs-platform-automation-reference-pipeline-config - outputs: - - name: docs - image_resource: - type: registry-image - source: - repository: ((harbor.docs-repository)) - run: - path: mkdocs-pivotal-theme/ci/build-docs/build_docs.rb - args: - - '--output-dir' - - './docs' - - '--docs-dir' - - '.' - - '--docs-prefix' - - 'docs-platform-automation' - - '--site-prefix' - - 'platform-automation' - - '--domains' - - 'docs.pivotal.io docs-test.pivotal.io' - - '--ignore-directories' - - 'docs-platform-automation-reference-pipeline-config' - - '--exclude-from-dropdown' - - #@ excluded_versions - - task: set-redirects - file: docs-platform-automation/ci/tasks/additional-nginx-rewrite-rules.yml - - put: docs-app-production - params: - manifest: docs/manifest.yml - path: docs - current_app_name: docs-platform-automation - show_app_log: true diff --git a/ci/docs/versions.yml b/ci/docs/versions.yml deleted file mode 100644 index 6b707bc3..00000000 --- a/ci/docs/versions.yml +++ /dev/null @@ -1,36 +0,0 @@ -#@data/values ---- -versions: - - branch: develop - supported: true - - branch: v4.4 - tag_filter: v4.4* - supported: true - - branch: v5.0 - tag_filter: v5.0* - supported: true - - branch: v1.0 - tag_filter: v1.0* - exclude_from_dropdown: true - - branch: v1.1 - tag_filter: v1.1* - exclude_from_dropdown: true - - branch: v2.0 - tag_filter: v2.0* - exclude_from_dropdown: true - - branch: v2.1 - tag_filter: v2.1* - exclude_from_dropdown: true - - branch: v2.2 - tag_filter: v2.2* - exclude_from_dropdown: true - - branch: v3.0 - tag_filter: v3.0* - - branch: v4.0 - tag_filter: v4.0* - - branch: v4.1 - tag_filter: v4.1* - - branch: v4.2 - tag_filter: v4.2* - - branch: v4.3 - tag_filter: v4.3* diff --git a/ci/go.sum b/ci/go.sum deleted file mode 100644 index d3c6ca13..00000000 --- a/ci/go.sum +++ /dev/null @@ -1,63 +0,0 @@ -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/ci/opsman-support/pipeline.yml b/ci/opsman-support/pipeline.yml deleted file mode 100644 index 5933e404..00000000 --- a/ci/opsman-support/pipeline.yml +++ /dev/null @@ -1,289 +0,0 @@ ---- -#@ opsman_versions = [ "2.9", "2.10"] -#@ pat_versions = ["4.4", "5.0"] -resource_types: -- name: pivnet - type: registry-image - source: - repository: pivotalcf/pivnet-resource - tag: latest-final -resources: - - name: weekly - type: time - source: - interval: 168h - location: America/Denver - start: 4:00 AM - stop: 11:00 AM - -#@ for pat_version in pat_versions: - - name: #@ "platform-automation-tasks-" + pat_version - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: platform-automation - product_version: #@ "^" + pat_version.replace('.', '\.') + "\.\d+$" - - - name: #@ "platform-automation-image-" + pat_version - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: platform-automation - product_version: #@ "^" + pat_version.replace('.', '\.') + "\.\d+$" -#@ end - - name: paving - type: git - source: - uri: https://github.com/pivotal/paving - - - name: docs-platform-automation - type: git - source: - uri: https://github.com/pivotal/docs-platform-automation - - - name: deployments - type: git - source: - branch: main - private_key: ((platform_automation_deployments.private_key)) - uri: git@github.com:pivotal/platform-automation-deployments - -#@ for opsman_version in opsman_versions: - - name: #@ "opsman-image-" + opsman_version - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: ops-manager - product_version: #@ "^" + opsman_version.replace('.', '\.') + "\.\d+$" - - - name: #@ "example-product-" + opsman_version - type: s3 - source: - bucket: example-product-pivotal-files - private: false - regexp: #@ "^example-product-(" + opsman_version.replace('.', '\.') + ".*)\.pivotal$" -#@ end - - name: stemcells-ubuntu-xenial - type: pivnet - source: - api_token: ((pivnet_token)) - product_slug: stemcells-ubuntu-xenial - product_version: ^97\..* - -jobs: -#@ for pat_version in pat_versions: -#@ for opsman_version in opsman_versions: -- name: #@ "test-opsman-" + opsman_version + "-with-pat-version-" + pat_version - serial: true - serial_groups: ["install"] - plan: - - in_parallel: - - get: stemcells-ubuntu-xenial - params: - globs: - - '*google*.tgz' - - get: #@ "example-product-" + opsman_version - - get: #@ "opsman-image-" + opsman_version - params: - globs: - - '*gcp*.yml' - - '*GCP.yml' - - get: weekly - trigger: true - - get: paving - - get: platform-automation-tasks - resource: #@ "platform-automation-tasks-" + pat_version - params: - unpack: true - globs: - - "*tasks*.zip" - - get: platform-automation-image - resource: #@ "platform-automation-image-" + pat_version - params: - unpack: true - globs: - - "*image*.tgz" - - get: deployments - - get: docs-platform-automation - - task: run-terraform - attempts: 2 - file: docs-platform-automation/ci/tasks/create-infrastructure/task.yml - params: - IAAS: gcp - DEPLOYMENT_NAME: ci-support - OM_PASSWORD: ((opsman-login.password)) - OM_USERNAME: ((opsman-login.username)) - PLATFORM_AUTOMATION_EMAIL: ((platform-automation-email)) - ensure: - put: deployments - params: - rebase: true - repository: deployments - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - config: paving - tasks: platform-automation-tasks - vars: deployments - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config/ci/configuration/gcp/director.yml config/ci/configuration/gcp/ops-manager.yml config/ci/configuration/auth.yml - VARS_PATHS: vars/ci-support/terraform-vars.yml - - task: create-vm - file: platform-automation-tasks/tasks/create-vm.yml - input_mapping: - image: #@ "opsman-image-" + opsman_version - state: deployments - config: paving - vars: deployments - params: - STATE_FILE: ci-support/state.yml - OPSMAN_CONFIG_FILE: ci/configuration/gcp/ops-manager.yml - VARS_FILES: vars/ci-support/terraform-vars.yml - image: platform-automation-image - ensure: - do: - - task: state-file - file: platform-automation-tasks/tasks/make-git-commit.yml - params: - GIT_AUTHOR_NAME: platform-automation-bot - GIT_AUTHOR_EMAIL: ((platform-automation-email)) - COMMIT_MESSAGE: #@ "create-vm update state file for support at " + opsman_version - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: ci-support/state.yml - input_mapping: - repository: deployments - file-source: generated-state - image: platform-automation-image - - put: deployments - params: - rebase: true - repository: repository-commit - - task: configure-authentication - file: platform-automation-tasks/tasks/configure-authentication.yml - image: platform-automation-image - attempts: 20 - input_mapping: - env: deployments - config: paving - params: - ENV_FILE: ci-support/env.yml - AUTH_CONFIG_FILE: ci/configuration/auth.yml - VARS_FILES: env/ci-support/terraform-vars.yml - - task: configure-director - file: platform-automation-tasks/tasks/configure-director.yml - image: platform-automation-image - input_mapping: - env: deployments - config: paving - params: - ENV_FILE: ci-support/env.yml - VARS_FILES: env/ci-support/terraform-vars.yml - DIRECTOR_CONFIG_FILE: ci/configuration/gcp/director.yml - - task: apply-director-changes - file: platform-automation-tasks/tasks/apply-director-changes.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-support/env.yml - - task: staged-director-config - file: platform-automation-tasks/tasks/staged-director-config.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-support/env.yml - - task: export-installation - file: platform-automation-tasks/tasks/export-installation.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-support/env.yml - - task: upload-product - image: platform-automation-image - file: platform-automation-tasks/tasks/upload-product.yml - input_mapping: - env: deployments - product: #@ "example-product-" + opsman_version - params: - ENV_FILE: ci-support/env.yml - - task: stage-configure-apply - image: platform-automation-image - file: platform-automation-tasks/tasks/stage-configure-apply.yml - input_mapping: - product: #@ "example-product-" + opsman_version - env: deployments - config: deployments - params: - ENV_FILE: ci-support/env.yml - CONFIG_FILE: ci-support/example-product.yml - VARS_FILES: env/ci-support/terraform-vars.yml - ALLOW_PENDING_CHANGES: false - - task: delete-installation - file: platform-automation-tasks/tasks/delete-installation.yml - image: platform-automation-image - input_mapping: - env: deployments - params: - ENV_FILE: ci-support/env.yml - - task: delete-gcp-opsman-vm - file: platform-automation-tasks/tasks/delete-vm.yml - input_mapping: - state: generated-state - config: paving - vars: deployments - params: - OPSMAN_CONFIG_FILE: ci/configuration/gcp/ops-manager.yml - VARS_FILES: vars/ci-support/terraform-vars.yml - image: platform-automation-image - ensure: - do: - - task: state-file - file: platform-automation-tasks/tasks/make-git-commit.yml - params: - GIT_AUTHOR_NAME: platform-automation-bot - GIT_AUTHOR_EMAIL: ((platform-automation-email)) - COMMIT_MESSAGE: #@ "delete-vm update state file for support at " + opsman_version - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: ci-support/state.yml - input_mapping: - repository: deployments - file-source: generated-state - image: platform-automation-image - - put: deployments - params: - rebase: true - repository: repository-commit - - task: delete-infrastructure - file: docs-platform-automation/ci/tasks/delete-infrastructure/task.yml - params: - IAAS: gcp - DEPLOYMENT_NAME: ci-support - PLATFORM_AUTOMATION_EMAIL: ((platform-automation-email)) - ensure: - put: deployments - params: - rebase: true - repository: deployments - ensure: - task: leftovers - file: docs-platform-automation/ci/tasks/leftovers.yml - params: - BBL_IAAS: gcp - BBL_AWS_ACCESS_KEY_ID: ((s3.access_key_id)) - BBL_AWS_SECRET_ACCESS_KEY: ((s3.secret_access_key)) - BBL_AWS_REGION: ((s3.region_name)) - BBL_AZURE_CLIENT_ID: ((azure.client_id)) - BBL_AZURE_CLIENT_SECRET: ((azure.client_secret)) - BBL_AZURE_TENANT_ID: ((azure.tenant_id)) - BBL_AZURE_SUBSCRIPTION_ID: ((azure.subscription_id)) - BBL_GCP_SERVICE_ACCOUNT_KEY: ((gcp.service_account)) - FILTER: ci-support - DRY_RUN: false - NO_CONFIRM: true -#@ end -#@ end diff --git a/ci/patch-notes/4.4-patch-notes.md b/ci/patch-notes/4.4-patch-notes.md deleted file mode 100644 index e69de29b..00000000 diff --git a/ci/patch-notes/5.0-patch-notes.md b/ci/patch-notes/5.0-patch-notes.md deleted file mode 100644 index e69de29b..00000000 diff --git a/ci/patch-notes/cve-patch-notes.md b/ci/patch-notes/cve-patch-notes.md deleted file mode 100644 index e69de29b..00000000 diff --git a/ci/python-mitigation-support/pipeline.yml b/ci/python-mitigation-support/pipeline.yml deleted file mode 100644 index 8dd0b707..00000000 --- a/ci/python-mitigation-support/pipeline.yml +++ /dev/null @@ -1,157 +0,0 @@ -#@ load("@ytt:data", "data") ---- -resource_types: -- name: pivnet - type: registry-image - source: - repository: pivotalcf/pivnet-resource - tag: latest-final -resources: -- name: docs-platform-automation - type: git - source: - uri: https://github.com/pivotal/docs-platform-automation -#@ for version in data.values.versions: -- name: #@ "docs-platform-automation-" + version.branch_name - type: git - source: - branch: #@ version.branch_name - private_key: ((platform_automation_docs.private_key)) - uri: git@github.com:pivotal/docs-platform-automation -- name: #@ "platform-automation-" + version.branch_name - type: git - source: - branch: #@ version.branch_name - private_key: ((platform_automation.private_key)) - uri: git@github.com:pivotal/docs-platform-automation -- name: #@ "pivnet-release-" + version.branch_name - type: pivnet - source: - access_key_id: ((pivnet_aws_access_key)) - api_token: ((pivnet_token)) - product_slug: platform-automation - product_version: #@ "^" + version.product_version_regex + "$" - secret_access_key: ((pivnet_aws_secret_key)) -- name: #@ "pivnet-release-" + version.branch_name + "-python-mitigation" - type: pivnet - source: - access_key_id: ((pivnet_aws_access_key)) - api_token: ((pivnet_token)) - product_slug: platform-automation - product_version: #@ version.product_version_regex - secret_access_key: ((pivnet_aws_secret_key)) -#@ end -- name: osl - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: open_source_license_Platform_Automation_Toolkit_for_VMware_Tanzu_(.*)_GA.txt - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: odp - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: VMware-Tanzu-platform-automation-toolkit-(.*)-ODP.tar.gz - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: image-receipt-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: image-receipt-(.*) - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: rc-image-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: platform-automation-image-(.*).tgz - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -- name: rc-tasks-s3 - type: s3 - source: - access_key_id: ((s3.access_key_id)) - bucket: ((s3.buckets.release_candidate)) - regexp: platform-automation-tasks-(.*).zip - region_name: ((s3.region_name)) - secret_access_key: ((s3.secret_access_key)) -jobs: -#@ for version in data.values.versions: -- name: #@ "update-" + version.branch_name - plan: - - in_parallel: - - get: #@ "platform-automation-" + version.branch_name - - get: #@ "docs-platform-automation-" + version.branch_name - - get: docs-platform-automation - - get: platform-automation-image - resource: #@ "pivnet-release-" + version.branch_name - trigger: true - params: { globs: ["*.tgz"] } - - get: osl - - get: odp - - task: generate-version - config: - platform: linux - image_resource: - type: docker-image - source: - repository: ((docker.ci-repository)) - tag: testing - inputs: - - name: platform-automation-image - outputs: - - name: version - run: - path: bash - args: - - -c - - | - set -eux - version="$(bosh int platform-automation-image/metadata.yaml --path /release/version)" - echo $version - echo "$version+python-mitigation" > version/version - - task: remove-python - privileged: true - file: docs-platform-automation/ci/tasks/python-mitigation.yml - - task: create-release-file - file: docs-platform-automation/ci/tasks/package-for-release/platform-automation.yml - input_mapping: - platform-automation-image: bumped-platform-automation-image - platform-automation: #@ "platform-automation-" + version.branch_name - - in_parallel: - - task: test-docker-import - privileged: true - file: docs-platform-automation/ci/tasks/test-docker-import/task.yml - - task: test-task - file: #@ "platform-automation-" + version.branch_name + "/tasks/test.yml" - image: platform-automation-image - input_mapping: - platform-automation-tasks: #@ "platform-automation-" + version.branch_name - - task: generate-platform-automation-metadata - file: docs-platform-automation/ci/tasks/pivnet-release/generate-platform-automation-metadata-bump.yml - - put: #@ "pivnet-release-" + version.branch_name + "-python-mitigation" - params: - file_glob: packaged-product/* - metadata_file: metadata/metadata.yml - s3_filepath_prefix: ((pivnet_s3_filepath_prefix)) - - put: image-receipt-s3 - params: - file: image-receipt/image-receipt-* - acl: public-read - - put: rc-image-s3 - params: - file: packaged-product/platform-automation-image-*.tgz - get_params: - skip_download: "true" - - put: rc-tasks-s3 - params: - file: packaged-product/platform-automation-tasks-*.zip - get_params: - skip_download: "true" -#@ end diff --git a/ci/python-mitigation-support/versions.yml b/ci/python-mitigation-support/versions.yml deleted file mode 100644 index 0d970cf6..00000000 --- a/ci/python-mitigation-support/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -#@data/values ---- -versions: - - branch_name: v4.4 - product_version_regex: 4\.4\.\d+ diff --git a/ci/scripts/generate-release-notes/generate-release-notes.go b/ci/scripts/generate-release-notes/generate-release-notes.go deleted file mode 100644 index d3ded549..00000000 --- a/ci/scripts/generate-release-notes/generate-release-notes.go +++ /dev/null @@ -1,332 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "fmt" - "github.com/blang/semver" - "github.com/jessevdk/go-flags" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "sort" - "strings" - "time" -) - -type command struct { - DocsRepoDir string `long:"docs-dir" required:"true" description:"the path to the docs git repo"` - PatchNotesPath []string `long:"patch-notes-path" description:"the path to the patch release notes. Will append all notes provided, in order."` - PatchVersions []string `long:"patch-versions" description:"a list patch versions to be released with their notes being --patch-notes-path"` -} - -func main() { - cmd := &command{} - _, err := flags.Parse(cmd) - if err != nil { - log.Fatalf("could not create release notes: %s\n", err) - } - - err = cmd.execute() - if err != nil { - log.Fatalf("could not create release notes: %s\n", err) - } -} - -var ( - documentRegex = regexp.MustCompile("(?ms)(.*?)\n(##[^#].*)") - sectionRegex = regexp.MustCompile(`^##[^#]*v(.*?)$`) -) - -type section struct { - version semver.Version - lines []string -} - -func (c *command) execute() error { - releaseNotes, err := getReleaseNotes(c.DocsRepoDir) - if err != nil { - return fmt.Errorf("could not open relase notes: %s", err) - } - - documentMatches := documentRegex.FindSubmatch(releaseNotes) - if len(documentMatches) == 0 { - return fmt.Errorf("could not find header, regex did not match") - } - - header := documentMatches[1] - versionsMatched := documentMatches[2] - sections := []section{} - minorVersions := map[string]bool{} - - reader := bufio.NewReader(bytes.NewBuffer(versionsMatched)) - for { - line, _, err := reader.ReadLine() - if err == io.EOF { - break - } - - if err != nil { - return fmt.Errorf("could not read lines from release notes: %s", err) - } - - matches := sectionRegex.FindSubmatch(line) - if len(matches) > 0 { - version := string(matches[1]) - - semverVersion, err := semver.Make(version) - if err != nil { - fmt.Printf("could not create semver for %s: %s", version, err) - continue - } - s := section{ - version: semverVersion, - lines: []string{string(line)}, - } - sections = append(sections, s) - - minorVersion := strings.Join(strings.Split(version, ".")[0:2], ".") - minorVersions[minorVersion] = true - } else { - sections = appendLineToLastSection(line, sections) - } - } - - if len(c.PatchNotesPath) > 0 { - for _, versionNumber := range c.PatchVersions { - version, err := semver.Parse(versionNumber) - if err != nil { - return fmt.Errorf("could not parse semver: %s", err) - } - - for _, section := range sections { - if section.version.Equals(version) { - return fmt.Errorf("the version v%s already exists, cannot generate release notes", version) - } - } - - lines, err := c.createReleaseNoteLines(version) - if err != nil { - return err - } - - sections = append(sections, section{ - version: version, - lines: lines, - }) - } - } - - sort.Slice(sections, func(i, j int) bool { - return sections[j].version.LT(sections[i].version) - }) - - err, _ = runCommand( - c.DocsRepoDir, - "git", "checkout", "develop", - ) - if err != nil { - return fmt.Errorf("could not check develop: %s", err) - } - - err = checkoutBranchForReleaseNotes(c.DocsRepoDir, "develop") - if err != nil { - return fmt.Errorf("could not checkout branch: %s", err) - } - - err = generateReleaseNotes(c.DocsRepoDir, "10000.0", header, sections) - if err != nil { - return fmt.Errorf("could not generate release notes for %s: %s", "develop", err) - } - - err = pushReleaseNotes(c.DocsRepoDir, "develop") - if err != nil { - return fmt.Errorf("could not push release notes for %s: %s", "develop", err) - } - - for minorVersion, _ := range minorVersions { - err = checkoutBranchForReleaseNotes(c.DocsRepoDir, fmt.Sprintf("v%s", minorVersion)) - if err != nil { - return fmt.Errorf("could not checkout branch: %s", err) - } - - err = generateReleaseNotes(c.DocsRepoDir, minorVersion, header, sections) - if err != nil { - return fmt.Errorf("could not generate release notes for %s: %s", minorVersion, err) - } - - err = pushReleaseNotes(c.DocsRepoDir, fmt.Sprintf("v%s", minorVersion)) - if err != nil { - return fmt.Errorf("could not push release notes for %s: %s", minorVersion, err) - } - } - - err, _ = runCommand( - c.DocsRepoDir, - "git", "checkout", "develop", - ) - - return err -} - -func (c *command) createReleaseNoteLines(version semver.Version) ([]string, error) { - releaseTimeLine := time.Now().Format("January 2, 2006") - lines := []string{ - fmt.Sprintf( - "## v%s\n%s\n", - version.String(), - releaseTimeLine, - ), - } - for _, patchNotesPath := range c.PatchNotesPath { - rawContents, err := ioutil.ReadFile(patchNotesPath) - contents := strings.TrimSuffix(string(rawContents), "\n") - if err != nil { - return nil, fmt.Errorf("could not read patch notes path: %s, %w", patchNotesPath, err) - } - lines = append(lines, strings.Split(contents, "\n")...) - } - - lines = append(lines, "") - return lines, nil -} - -func appendLineToLastSection(line []byte, sections []section) []section { - s := sections[len(sections)-1] - s.lines = append(s.lines, string(line)) - sections[len(sections)-1] = s - return sections -} - -func runCommand(dir, cmd string, args ...string) (error, string) { - ourStdout := &bytes.Buffer{} - fmt.Printf("command: git %s\n", strings.Join(args, " ")) - - command := exec.Command(cmd, args...) - command.Dir = dir - command.Stderr = os.Stderr - command.Stdout = io.MultiWriter(os.Stdout, ourStdout) - return command.Run(), ourStdout.String() -} - -func getReleaseNotes(docsRepoDir string) ([]byte, error) { - docsRepoDir, err := filepath.Abs(docsRepoDir) - if err != nil { - return []byte{}, err - } - - if docsRepoDir == "" { - return []byte{}, fmt.Errorf("please provide a --dir") - } - - if info, err := os.Stat(docsRepoDir); err != nil || !info.IsDir() { - return []byte{}, fmt.Errorf("the provided '%s' is not a directory", docsRepoDir) - } - - releaseNotes, err := ioutil.ReadFile(filepath.Join(docsRepoDir, "docs", "release-notes.md")) - if err != nil { - return []byte{}, err - } - - return releaseNotes, nil -} - -func checkoutBranchForReleaseNotes(docsRepoDir, branchName string) error { - err, _ := runCommand( - docsRepoDir, - "git", "checkout", branchName, - ) - if err != nil { - return fmt.Errorf("could not checkout %s: %s", branchName, err) - } - - err, _ = runCommand( - docsRepoDir, - "git", "clean", "-ffd", "external", "||", "true", - ) - if err != nil { - return fmt.Errorf("could not remove external folder: %s\n", err) - } - - err, _ = runCommand( - docsRepoDir, - "git", "pull", "origin", branchName, "-r", - ) - if err != nil { - return fmt.Errorf("could not pull commits for %s: %s", branchName, err) - } - - return nil -} - -func generateReleaseNotes(docsRepoDir, minorVersion string, header []byte, sections []section) error { - fmt.Printf("creating release notes for %s\n", minorVersion) - releaseNotesFile, err := os.Create(filepath.Join(docsRepoDir, "docs", "release-notes.md")) - if err != nil { - return fmt.Errorf("could not create file for %s: %s", minorVersion, err) - } - - semverMinorVersion, err := semver.Make(fmt.Sprintf("%s.999", minorVersion)) - if err != nil { - return fmt.Errorf("could not create semver for %s: %s", minorVersion, err) - } - - _, err = releaseNotesFile.Write(header) - if err != nil { - return fmt.Errorf("could write header: %s", err) - } - - _, _ = releaseNotesFile.WriteString("\n") - - for _, section := range sections { - compared := semverMinorVersion.Compare(section.version) - - if compared >= 0 { - for _, line := range section.lines { - _, _ = releaseNotesFile.WriteString(line) - _, _ = releaseNotesFile.WriteString("\n") - } - } - } - - err = releaseNotesFile.Close() - if err != nil { - return fmt.Errorf("could not close release note file: %s", err) - } - - return nil -} - -func pushReleaseNotes(docsRepoDir, minorVersion string) error { - err, ourStdout := runCommand( - docsRepoDir, - "git", "status", "--porcelain", - ) - if err != nil { - return fmt.Errorf("could not checkout cleanly: %s", err) - } - - if ourStdout != "" { - err, _ := runCommand( - docsRepoDir, - "git", "commit", "-am", fmt.Sprintf("generated release notes %s", minorVersion), - ) - if err != nil { - return fmt.Errorf("could not commit %s: %s", minorVersion, err) - } - - err, _ = runCommand( - docsRepoDir, - "git", "push", "-u", "origin", minorVersion, - ) - - if err != nil { - return fmt.Errorf("could not push commit %s: %s", minorVersion, err) - } - } - return nil -} diff --git a/ci/scripts/generate-release-notes/generate_release_notes_test.go b/ci/scripts/generate-release-notes/generate_release_notes_test.go deleted file mode 100644 index 436e6907..00000000 --- a/ci/scripts/generate-release-notes/generate_release_notes_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package main_test - -import ( - "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/gbytes" - "github.com/onsi/gomega/gexec" - "io/ioutil" - "os" - "os/exec" - "time" -) - -var _ = Describe("GenerateReleaseNotes", func() { - var ( - compiledPath string - repo *git - upstreamRepo *git - ) - - BeforeEach(func() { - var err error - compiledPath, err = gexec.Build("generate-release-notes.go") - Expect(err).NotTo(HaveOccurred()) - - repo, err = initGitRepo() - Expect(err).NotTo(HaveOccurred()) - - upstreamRepo, err = newGitRepo() - Expect(err).NotTo(HaveOccurred()) - - err = repo.run("remote", "add", "origin", upstreamRepo.dir) - Expect(err).NotTo(HaveOccurred()) - - err = repo.write("docs/release-notes.md", stableReleaseNotes) - Expect(err).NotTo(HaveOccurred()) - - err = repo.run("add", "-A") - Expect(err).NotTo(HaveOccurred()) - - err = repo.run("commit", "-m", "init") - Expect(err).NotTo(HaveOccurred()) - - err = repo.createBranches("v1.0", "v2.0") - Expect(err).NotTo(HaveOccurred()) - - upstreamRepo.run("clone", "--bare", repo.dir, upstreamRepo.dir) - }) - - When("generating release notes for previous versions", func() { - It("create the release notes from `develop`", func() { - command := exec.Command(compiledPath, "--docs-dir", repo.dir) - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit(0)) - - By("keeping all the release notes on develop") - notes, err := repo.readFileFrom("develop", "docs/release-notes.md") - Expect(err).NotTo(HaveOccurred()) - Expect(notes).To(Equal(stableReleaseNotes)) - - By("having only v1.0 release notes on the v1.0 branch") - notes, err = repo.readFileFrom("v1.0", "docs/release-notes.md") - Expect(err).NotTo(HaveOccurred()) - Expect(notes).To(ContainSubstring("Header!!!")) - Expect(notes).To(ContainSubstring("## v1.0.0")) - Expect(notes).To(ContainSubstring("- Feature message 1.")) - Expect(notes).ToNot(ContainSubstring("## v2.0.0")) - Expect(notes).ToNot(ContainSubstring("- Fixes message 1.")) - - By("having v2.0 an v1.0 release notes on the v2.0 branch") - notes, err = repo.readFileFrom("v2.0", "docs/release-notes.md") - Expect(err).NotTo(HaveOccurred()) - Expect(notes).To(ContainSubstring("Header!!!")) - Expect(notes).To(ContainSubstring("## v1.0.0")) - Expect(notes).To(ContainSubstring("- Feature message 1.")) - Expect(notes).To(ContainSubstring("## v2.0.0")) - Expect(notes).To(ContainSubstring("- Fixes message 1.")) - }) - }) - - When("automatically adding release notes", func() { - When("the release notes have been previously generated", func() { - It("errors with a helpful message", func() { - err := repo.write("docs/release-notes.md", stableReleaseNotes + "\n\n## v1.0.1") - Expect(err).NotTo(HaveOccurred()) - - err = repo.run("add", "-A") - Expect(err).NotTo(HaveOccurred()) - - err = repo.run("commit", "-m", "init") - Expect(err).NotTo(HaveOccurred()) - - patchNotesFile, err := ioutil.TempFile("", "") - Expect(err).NotTo(HaveOccurred()) - - err = ioutil.WriteFile(patchNotesFile.Name(), []byte(patchNotes), os.ModePerm) - Expect(err).NotTo(HaveOccurred()) - - command := exec.Command(compiledPath, - "--docs-dir", repo.dir, - "--patch-notes-path", patchNotesFile.Name(), - "--patch-versions", "1.0.1", - ) - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit(1)) - Expect(session.Err).To(gbytes.Say("the version v1.0.1 already exists, cannot generate release notes")) - }) - }) - - It("adds the notes in the correct place based on semver", func() { - patchNotesFile, err := ioutil.TempFile("", "") - Expect(err).NotTo(HaveOccurred()) - additionalPatchNotesFile, err := ioutil.TempFile("", "") - Expect(err).NotTo(HaveOccurred()) - - err = ioutil.WriteFile(patchNotesFile.Name(), []byte(patchNotes), os.ModePerm) - Expect(err).NotTo(HaveOccurred()) - err = ioutil.WriteFile(additionalPatchNotesFile.Name(), []byte(additionalPatchNotes), os.ModePerm) - Expect(err).NotTo(HaveOccurred()) - - command := exec.Command(compiledPath, - "--docs-dir", repo.dir, - "--patch-notes-path", patchNotesFile.Name(), - "--patch-notes-path", additionalPatchNotesFile.Name(), - "--patch-versions", "1.0.1", - "--patch-versions", "2.0.1", - ) - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit(0)) - - By("keeping all the release notes on develop") - notes, err := repo.readFileFrom("develop", "docs/release-notes.md") - Expect(err).NotTo(HaveOccurred()) - Expect(notes).To(ContainSubstring(expectedReleaseNotesWithPatches)) - - By("having only v1.0 release notes on the v1.0 branch") - notes, err = repo.readFileFrom("v1.0", "docs/release-notes.md") - Expect(err).NotTo(HaveOccurred()) - Expect(notes).To(ContainSubstring("Header!!!")) - Expect(notes).To(ContainSubstring("## v1.0.0")) - Expect(notes).To(ContainSubstring("- Feature message 1.")) - Expect(notes).ToNot(ContainSubstring("## v2.0.0")) - Expect(notes).ToNot(ContainSubstring("- Fixes message 1.")) - Expect(notes).To(ContainSubstring("## v1.0.1")) - Expect(notes).To(ContainSubstring("- We did it! All fixed!")) - Expect(notes).To(ContainSubstring("- additonal note, another thing fixed!")) - Expect(notes).To(ContainSubstring("- We just like to fix all the things")) - - By("having v2.0 an v1.0 release notes on the v2.0 branch") - notes, err = repo.readFileFrom("v2.0", "docs/release-notes.md") - Expect(err).NotTo(HaveOccurred()) - Expect(notes).To(ContainSubstring("Header!!!")) - Expect(notes).To(ContainSubstring("## v1.0.0")) - Expect(notes).To(ContainSubstring("- Feature message 1.")) - Expect(notes).To(ContainSubstring("## v2.0.0")) - Expect(notes).To(ContainSubstring("- Fixes message 1.")) - Expect(notes).To(ContainSubstring("## v1.0.1")) - Expect(notes).To(ContainSubstring("- We did it! All fixed!")) - }) - }) -}) - -const patchNotes = `### Fixes -- We did it! All fixed! -` - -const additionalPatchNotes = `- additonal note, another thing fixed! -- We just like to fix all the things -` - -var expectedReleaseNotesWithPatches = fmt.Sprintf(` -Header!!! - -## v2.0.1 -%s - -### Fixes -- We did it! All fixed! -- additonal note, another thing fixed! -- We just like to fix all the things - -## v2.0.0 - -### Fixes -- Fixes message 1. -- Fixes message 2. - -## v1.0.1 -%s - -### Fixes -- We did it! All fixed! -- additonal note, another thing fixed! -- We just like to fix all the things - -## v1.0.0 - -### Features -- Feature message 1. -- Feature message 2. -`, time.Now().Format("January 2, 2006"), time.Now().Format("January 2, 2006")) - -const stableReleaseNotes = ` -Header!!! - -## v2.0.0 - -### Fixes -- Fixes message 1. -- Fixes message 2. - -## v1.0.0 - -### Features -- Feature message 1. -- Feature message 2. -` diff --git a/ci/scripts/generate-release-notes/git_test.go b/ci/scripts/generate-release-notes/git_test.go deleted file mode 100644 index a32a84fc..00000000 --- a/ci/scripts/generate-release-notes/git_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package main_test - -import ( - "fmt" - . "github.com/onsi/ginkgo" - "github.com/onsi/gomega/gexec" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" -) - -type git struct { - dir string -} - -func newGitRepo() (*git, error) { - dir, err := ioutil.TempDir("", "") - if err != nil { - return nil, err - } - - repo := &git{dir: dir} - return repo, nil -} - -func initGitRepo() (*git, error) { - repo, err := newGitRepo() - if err != nil { - return nil, err - } - - err = repo.run("init", "--initial-branch", "develop") - if err != nil { - return nil, err - } - - err = repo.run("commit", "--allow-empty", "-m", "init") - if err != nil { - return nil, err - } - - return repo, nil -} - -func (g *git) write(filename string, contents string) error { - fullPath := filepath.Join(g.dir, filename) - - docsDir := filepath.Dir(fullPath) - err := os.MkdirAll(docsDir, os.ModePerm) - if err != nil { - return err - } - - return ioutil.WriteFile(fullPath, []byte(contents), os.ModePerm) -} - -func (g *git) run(args ...string) error { - command := exec.Command("git", args...) - command.Dir = g.dir - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - if err != nil { - return err - } - session.Wait() - errCode := session.ExitCode() - if errCode == 0 { - return nil - } - - return fmt.Errorf("could not command 'git %s' with exit code %d", strings.Join(args, " "), errCode) -} - -func (g *git) createBranches(names ...string) error { - for _, name := range names { - err := g.run("checkout", "-b", name) - if err != nil { - return err - } - err = g.run("commit", "--allow-empty", "-m", "init") - if err != nil { - return err - } - } - - return g.run("checkout", "develop") -} - -func (g *git) readFileFrom(branch string, filename string) (string, error) { - err := g.run("checkout", branch) - if err != nil { - return "", err - } - - contents, err := ioutil.ReadFile(filepath.Join(g.dir, filename)) - if err != nil { - return "", err - } - - return string(contents), nil -} diff --git a/ci/scripts/generate-release-notes/scripts_suite_test.go b/ci/scripts/generate-release-notes/scripts_suite_test.go deleted file mode 100644 index b7569f02..00000000 --- a/ci/scripts/generate-release-notes/scripts_suite_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package main_test - -import ( - "github.com/onsi/gomega/gexec" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestScripts(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Generate Release Notes Suite") -} - -var _ = AfterSuite(func(){ - gexec.CleanupBuildArtifacts() -}) diff --git a/ci/scripts/insert-cli-versions-into-release-notes/insert-cli-versions-into-release-notes.go b/ci/scripts/insert-cli-versions-into-release-notes/insert-cli-versions-into-release-notes.go deleted file mode 100644 index fa1dc6c4..00000000 --- a/ci/scripts/insert-cli-versions-into-release-notes/insert-cli-versions-into-release-notes.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "os" -) - -func main() { - - if len(os.Args) <= 3 { - log.Fatal("You must provide a path to the release notes, path to a version table, and a version number.") - } - - releaseNotesFile := os.Args[1] - versionTableFile := os.Args[2] - version := os.Args[3] - - releaseNotes, err := ioutil.ReadFile(releaseNotesFile) - if err != nil { - log.Fatalf("Could not read release notes file: %s", err) - } - - versionSeparator := []byte("## v" + version) - separatorForTableInsertion := []byte("###") - - err = checkVersionExists(releaseNotes, versionSeparator) - if err != nil { - log.Fatalf("could not check versions exists: %s", err) - } - - versionTable, err := ioutil.ReadFile(versionTableFile) - if err != nil { - log.Fatalf("Could not read version table file: %s", err) - } - - - notesSplitOnVersionSeparator := bytes.Split(releaseNotes, versionSeparator) - - targetVersionSectionBisected := bytes.SplitN(notesSplitOnVersionSeparator[1], separatorForTableInsertion, 2) - - err = checkVersionTableExists(targetVersionSectionBisected[0]) - if err!= nil { - log.Fatalf("could not check versions table: %s", err) - } - - injectVersionTableAndImage(targetVersionSectionBisected, versionTable, version) - - addBackTargetVersionHeaderSeparator(notesSplitOnVersionSeparator, targetVersionSectionBisected) - - newContents := addBackVersionSeparator(notesSplitOnVersionSeparator, versionSeparator) - - err = ioutil.WriteFile(os.Args[1], []byte(newContents), os.FileMode(0644)) - if err != nil { - log.Fatalf("Could not write table to release notes: %s", err) - } - - os.Exit(0) -} - -func checkVersionExists(releaseNotes, versionSeparator []byte) error { - if bytes.Contains(releaseNotes, versionSeparator) { - return nil - } - return errors.New("the requested version is not present in the release notes") -} - -func checkVersionTableExists(headerSection []byte) error { - if bytes.Contains(headerSection, []byte("|--")) { - return errors.New("the requested version already has a table in the release notes. Remove table or try a different version") - } - return nil -} - -func injectVersionTableAndImage(targetVersionSectionBisected [][]byte, versionTable []byte, version string) { - imageReceipt := fmt.Sprintf(" The full Docker image-receipt: Download\n\n", version) - targetVersionSectionBisected[0] = []byte(string(targetVersionSectionBisected[0]) + string(versionTable) + imageReceipt) -} - -func addBackTargetVersionHeaderSeparator(notesSplitOnVersionSeparator, targetVersionSectionBisected [][]byte) { - notesSplitOnVersionSeparator[1] = bytes.Join(targetVersionSectionBisected, []byte("###")) -} - -func addBackVersionSeparator(notesSplitOnVersionSeparator [][]byte, versionSeparator []byte) []byte{ - return bytes.Join(notesSplitOnVersionSeparator, versionSeparator) -} diff --git a/ci/scripts/insert-cli-versions-into-release-notes/insert-cli-versions-into-release-notes_test.go b/ci/scripts/insert-cli-versions-into-release-notes/insert-cli-versions-into-release-notes_test.go deleted file mode 100644 index cd101cc9..00000000 --- a/ci/scripts/insert-cli-versions-into-release-notes/insert-cli-versions-into-release-notes_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - "os/exec" - "regexp" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/gbytes" - "github.com/onsi/gomega/gexec" -) - -var _ = Describe("Insert cli versions into release notes script", func() { - var ( - compiledPath string - ) - - BeforeEach(func() { - var err error - compiledPath, err = gexec.Build("insert-cli-versions-into-release-notes.go") - Expect(err).NotTo(HaveOccurred()) - }) - - When("improper number of arguments are provided", func() { - It("gives an error saying a path to the release notes file, version table, and version number are required", func() { - command := exec.Command(compiledPath, "release-notes.md", "versionTable.md") - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit(1)) - Expect(session.Err).To(gbytes.Say(regexp.QuoteMeta("You must provide a path to the release notes, path to a version table, and a version number."))) - }) - }) - - When("valid release notes, version table, and versions args are provided", func() { - var ( - releaseNotesFile *os.File - releaseNotesMD string - versionTableFile *os.File - versionTableMD string - versionArg string - ) - - BeforeEach(func() { - var err error - releaseNotesMD = `# Example Release Notes - -## v1.3.0 -Released Today - -### New Features - -- has a feature! - -### Bug Fixes -etc - -## v1.2.3 -Released in the Past - -??? "CLI Versions" - - | tool | version | | | | - |------|---------|---|---|---| - | om | v1.0.0 | | | | - | | | | | | - | | | | | | - - The full Docker image-receipt: Download - -### Bug Fixes -etc unrelated version test 999.99.9 - -## v1.1.4 -Released in History Times - -We had om at version 0.57 back then man. - -### Security Fixes -etc -` - releaseNotesFile, err = ioutil.TempFile("", "release-notes-*.md") - Expect(err).NotTo(HaveOccurred()) - err = ioutil.WriteFile(releaseNotesFile.Name(), []byte(releaseNotesMD), os.FileMode(0644)) - Expect(err).NotTo(HaveOccurred()) - - versionTableMD = `??? "CLI Versions" - - | tool | version | | | | - |------|---------|---|---|---| - | om | v2.0.0 | | | | - | | | | | | - | | | | | | - -` - versionTableFile, err = ioutil.TempFile("", "cli-versions-*.md") - err = ioutil.WriteFile(versionTableFile.Name(), []byte(versionTableMD), os.FileMode(0644)) - Expect(err).NotTo(HaveOccurred()) - - versionArg = "1.3.0" - - }) - - It("updates the specified version entry with the table, adds image receipt link, and exits 0", func() { - command := exec.Command(compiledPath, releaseNotesFile.Name(), versionTableFile.Name(), versionArg) - - expectedReleaseNotesMD := `# Example Release Notes - -## v1.3.0 -Released Today - -??? "CLI Versions" - - | tool | version | | | | - |------|---------|---|---|---| - | om | v2.0.0 | | | | - | | | | | | - | | | | | | - - The full Docker image-receipt: Download - -### New Features - -- has a feature! - -### Bug Fixes -etc - -## v1.2.3 -Released in the Past - -??? "CLI Versions" - - | tool | version | | | | - |------|---------|---|---|---| - | om | v1.0.0 | | | | - | | | | | | - | | | | | | - - The full Docker image-receipt: Download - -### Bug Fixes -etc unrelated version test 999.99.9 - -## v1.1.4 -Released in History Times - -We had om at version 0.57 back then man. - -### Security Fixes -etc -` - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - Expect(err).NotTo(HaveOccurred()) - - Eventually(session).Should(gexec.Exit(0)) - - contents, err := ioutil.ReadFile(releaseNotesFile.Name()) - Expect(err).NotTo(HaveOccurred()) - - Expect(string(contents)).To(BeEquivalentTo(expectedReleaseNotesMD)) - }) - - When("Version passed is not present in release notes", func() { - It("gives an error saying that version is not there", func() { - versionArg = "999.99.9" - command := exec.Command(compiledPath, releaseNotesFile.Name(), versionTableFile.Name(), versionArg) - - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - - - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit(1)) - Expect(session.Err).To(gbytes.Say(regexp.QuoteMeta("the requested version is not present in the release notes"))) - }) - }) - - When("Version passed already has a table", func() { - It("gives an error saying that version has a table already", func() { - versionArg = "1.2.3" - command := exec.Command(compiledPath, releaseNotesFile.Name(), versionTableFile.Name(), versionArg) - - session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) - - - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit(1)) - Expect(session.Err).To(gbytes.Say(regexp.QuoteMeta("the requested version already has a table in the release notes. Remove table or try a different version"))) - }) - }) - }) - -}) diff --git a/ci/scripts/insert-cli-versions-into-release-notes/scripts_suite_test.go b/ci/scripts/insert-cli-versions-into-release-notes/scripts_suite_test.go deleted file mode 100644 index 6bc44016..00000000 --- a/ci/scripts/insert-cli-versions-into-release-notes/scripts_suite_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package main_test - -import ( - "github.com/onsi/gomega/gexec" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestScripts(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Insert CLI Versions Suite") -} - -var _ = AfterSuite(func(){ - gexec.CleanupBuildArtifacts() -}) diff --git a/ci/scripts/shellcheck-tasks.rb b/ci/scripts/shellcheck-tasks.rb deleted file mode 100755 index a1e00d42..00000000 --- a/ci/scripts/shellcheck-tasks.rb +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env ruby - -require 'English' -require 'yaml' -require 'tempfile' - -Dir[File.join(__dir__, '..', 'tasks/**/*.sh')].each do |script| - print "shellcheck #{script} - " - output = `shellcheck #{script}` - if $CHILD_STATUS.exitstatus == 0 - puts 'passed' - else - puts 'failed' - print output if ENV['VERBOSE'] == '1' - end -end - -Dir[File.join(__dir__, '..', 'tasks/**/*.yml')].each do |file| - task = YAML.load_file(file) - - if task.dig('run', 'path') == 'bash' - print "shellcheck #{file} - " - - script = Tempfile.new('script') - script.write(task.dig('run', 'args', 1)) - script.close - - output = `shellcheck -s bash #{script.path}` - if $CHILD_STATUS.exitstatus == 0 - puts 'passed' - else - puts 'failed' - print output if ENV['VERBOSE'] == '1' - end - end -end diff --git a/ci/scripts/update-ci.sh b/ci/scripts/update-ci.sh deleted file mode 100755 index 1e03e396..00000000 --- a/ci/scripts/update-ci.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -WORKING_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -which ytt || ( - echo "This requires ytt to be installed" - exit 1 -) -which fly || ( - echo "This requires fly to be installed" - exit 1 -) - -echo "Setting CI pipeline..." - -fly -t platform-automation sp -p ci -c <(ytt -f $WORKING_DIR/../ci/) \ - --check-creds - -fly -t platform-automation sp -p python-mitigation-support -c <(ytt -f $WORKING_DIR/../python-mitigation-support/) \ - --check-creds - -echo "Setting support pipeline..." - -fly -t platform-automation sp -p support-pipeline -c <(ytt -f $WORKING_DIR/../opsman-support) \ - --check-creds diff --git a/ci/scripts/update-runway-ci.sh b/ci/scripts/update-runway-ci.sh deleted file mode 100755 index aaf73a7b..00000000 --- a/ci/scripts/update-runway-ci.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -WORKING_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -which ytt || ( - echo "This requires ytt to be installed" - exit 1 -) -which fly || ( - echo "This requires fly to be installed" - exit 1 -) - -echo "Setting Docs CI pipeline on Runway..." -fly -t runway-platform-automation sp -p platform-automation-docs -c <(ytt -f "$WORKING_DIR/../docs/") \ - --check-creds diff --git a/ci/tasks/additional-nginx-rewrite-rules.yml b/ci/tasks/additional-nginx-rewrite-rules.yml deleted file mode 100644 index e4822741..00000000 --- a/ci/tasks/additional-nginx-rewrite-rules.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -platform: linux -image_resource: - type: registry-image - source: - repository: harbor-repo.vmware.com/dockerhub-proxy-cache/library/ubuntu -inputs: -- name: docs -- name: docs-platform-automation -outputs: -- name: docs -run: - path: bash - args: - - -c - - | - set -eux - - cat docs-platform-automation/ci/tasks/docs-nginx-rewrite-rules.conf >> docs/nginx/conf/redirect.conf diff --git a/ci/tasks/build-oci-image.yml b/ci/tasks/build-oci-image.yml deleted file mode 100644 index b7134a44..00000000 --- a/ci/tasks/build-oci-image.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -platform: linux -image_resource: - type: registry-image - source: - repository: vito/oci-build-task - -inputs: -- name: docs-platform-automation -outputs: -- name: image -params: - DOCKERFILE: - -run: - path: build diff --git a/ci/tasks/check-osl-reuse-validity.yml b/ci/tasks/check-osl-reuse-validity.yml deleted file mode 100644 index 880a591e..00000000 --- a/ci/tasks/check-osl-reuse-validity.yml +++ /dev/null @@ -1,32 +0,0 @@ -platform: linux - -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing - -inputs: -- name: rc-image-receipt-s3 -- name: osl-validated-image-receipt-s3 - -run: - path: bash - args: - - -c - - | - set -eux - grep ii rc-image-receipt-s3/image-receipt-test | awk '{print $2}' > dpglist - grep ii osl-validated-image-receipt-s3/image-receipt-* | awk '{print $2}' > dpglistold - - if ! diff dpglist dpglistold ; then - { echo "A dependency change, as shown above, will prevent OSL reuse. - If you are willing to go through the OSL process, - update the validated image input to this task, - run the NORSK pipelines, - and submit a new version for OSM validation. - Remember that this will take at least two weeks. - If not, see if you can get rid of the dependency change."; } 2> /dev/null - - exit 1 - fi diff --git a/ci/tasks/create-infrastructure/task.sh b/ci/tasks/create-infrastructure/task.sh deleted file mode 100755 index cabd504e..00000000 --- a/ci/tasks/create-infrastructure/task.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -set -eux - -terraform_path=$PWD/paving/$IAAS -deployment_path=$PWD/deployments/$DEPLOYMENT_NAME - -commit() { - cp "$terraform_path"/terraform.tfstate "$deployment_path" - pushd "$deployment_path" - git config --global user.name "platform-automation-bot" - git config --global user.email "$PLATFORM_AUTOMATION_EMAIL" - git add terraform.tfstate - - if [ -e "$terraform_path"/terraform-vars.yml ]; then - cp "$terraform_path"/terraform-vars.yml "$deployment_path" - git add terraform-vars.yml - fi - - cat > env.yml < terraform-vars.yml -else - terraform output -raw stable_config_opsmanager > terraform-vars.yml -fi \ No newline at end of file diff --git a/ci/tasks/create-infrastructure/task.yml b/ci/tasks/create-infrastructure/task.yml deleted file mode 100644 index 629a9f10..00000000 --- a/ci/tasks/create-infrastructure/task.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -platform: linux -inputs: -- name: docs-platform-automation -- name: deployments -- name: paving -outputs: -- name: deployments -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -run: - path: docs-platform-automation/ci/tasks/create-infrastructure/task.sh diff --git a/ci/tasks/create-pks-cluster/task.sh b/ci/tasks/create-pks-cluster/task.sh deleted file mode 100755 index 40c3ee5d..00000000 --- a/ci/tasks/create-pks-cluster/task.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -set -eux - -chmod +x pks-cli/tkgi-* -mv pks-cli/tkgi-* pks - -echo "Signing in to UAA..." -uaac target --skip-ssl-validation "https://api.pks.$DOMAIN":8443 -secret="$(om --env "$ENV_FILE" \ - credentials \ - -p pivotal-container-service \ - --credential-reference '.properties.pks_uaa_management_admin_client' --format json | jq -r .secret)" -uaac token client get admin -s "$secret" - -userExists="$(uaac user get platform-automation)" -if [[ "$userExists" == *"CF::UAA::NotFound: CF::UAA::NotFound"* ]]; then - echo "Creating the platform-automation user in UAA..." - uaac user add platform-automation --emails platform-automation@example.com -p super-secret-password - uaac member add pks.clusters.admin platform-automation -else - echo "platform-automation user is already created. Skipping..." -fi - -./pks login -a "api.pks.$DOMAIN" -u platform-automation -p super-secret-password --skip-ssl-validation -cluster="$(./pks clusters)" -if [[ "$cluster" == *"$CLUSTER_NAME"* ]]; then - echo "Cluster: $CLUSTER_NAME already exists. Done." -else - echo "Creating new pks cluster: $CLUSTER_NAME..." - ./pks create-cluster "$CLUSTER_NAME" --plan small --external-hostname example.hostname - - echo "Waiting until cluster is created (this can take up to 30 minutes to complete)..." - complete=$(./pks cluster "$CLUSTER_NAME") - while [[ "$complete" != *"succeeded"* ]]; do - echo "Cluster is still creating. Waiting for $SLEEP_INTERVAL..." - sleep "$SLEEP_INTERVAL" - complete=$(./pks cluster "$CLUSTER_NAME") - done - - echo "Cluster: $CLUSTER_NAME has been created." - exit 0 -fi diff --git a/ci/tasks/create-pks-cluster/task.yml b/ci/tasks/create-pks-cluster/task.yml deleted file mode 100644 index 9d3711c3..00000000 --- a/ci/tasks/create-pks-cluster/task.yml +++ /dev/null @@ -1,17 +0,0 @@ -platform: linux -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -inputs: - - name: pks-cli - - name: docs-platform-automation - - name: deployments -params: - DOMAIN: reference-gcp.gcp.platform-automation.cf-app.com - CLUSTER_NAME: test-additional-tasks-cluster - SLEEP_INTERVAL: 1m - ENV_FILE: -run: - path: docs-platform-automation/ci/tasks/create-pks-cluster/task.sh diff --git a/ci/tasks/create-secret-slack-notification.yml b/ci/tasks/create-secret-slack-notification.yml deleted file mode 100644 index 5400be27..00000000 --- a/ci/tasks/create-secret-slack-notification.yml +++ /dev/null @@ -1,18 +0,0 @@ -platform: linux -image_resource: - type: registry-image - source: - repository: concourse/buildroot - tag: curl -run: - path: bash - args: - - -c - - | - set -eu - - cat > notification-text/text < config/download-product.yml < /tmp/tf-vars.yml - popd -} - -add_terraform_env - -echo "Attempting to copy optional files to outputs..." -cp deployments/platform-automation/"$IAAS"/state/*.yml state/ || true -cp deployments/platform-automation/"$IAAS"/vars/*.yml vars/ || true -cp deployments/platform-automation/"$IAAS"/config/*.yml config/ - -echo "Interpolating configs..." -bosh int -l /tmp/tf-vars.yml deployments/platform-automation/"$IAAS"/config/opsman.yml > config/opsman.yml -bosh int -l /tmp/tf-vars.yml --vars-env=TF_VARS deployments/platform-automation/"$IAAS"/config/director.yml > config/director.yml -bosh int -l /tmp/tf-vars.yml --vars-env=TF_VARS deployments/platform-automation/"$IAAS"/env/env.yml > env/env.yml - -echo "Config generation nominal" diff --git a/ci/tasks/generate-config/task.yml b/ci/tasks/generate-config/task.yml deleted file mode 100644 index 734da8fe..00000000 --- a/ci/tasks/generate-config/task.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -platform: linux -inputs: -- name: deployments -- name: docs-platform-automation -outputs: -- name: config -- name: env -- name: vars -- name: state -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -params: - IAAS: -run: - path: docs-platform-automation/ci/tasks/generate-config/task.sh diff --git a/ci/tasks/generate-dpkg-list-for-OSL.yml b/ci/tasks/generate-dpkg-list-for-OSL.yml deleted file mode 100644 index d3337918..00000000 --- a/ci/tasks/generate-dpkg-list-for-OSL.yml +++ /dev/null @@ -1,12 +0,0 @@ -platform: linux -run: - path: bash - args: - - -c - - | - set -eux - dpkg -l > rc-image-receipt-s3/image-receipt-"$(cat version/version)" -inputs: -- name: version -outputs: -- name: rc-image-receipt-s3 diff --git a/ci/tasks/leftovers.yml b/ci/tasks/leftovers.yml deleted file mode 100644 index 7f113935..00000000 --- a/ci/tasks/leftovers.yml +++ /dev/null @@ -1,85 +0,0 @@ -platform: linux -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -params: - # A list of IAAS specific authentication and targeting params. - BBL_IAAS: - - # AWS - BBL_AWS_ACCESS_KEY_ID: - BBL_AWS_SECRET_ACCESS_KEY: - BBL_AWS_REGION: - - # Azure - BBL_AZURE_CLIENT_ID: - BBL_AZURE_CLIENT_SECRET: - BBL_AZURE_TENANT_ID: - BBL_AZURE_SUBSCRIPTION_ID: - - # GCP - # Note: Please pass the JSON account key. This task automatically makes it a file for use in `leftovers`. - BBL_GCP_SERVICE_ACCOUNT_KEY: - - # Vsphere - BBL_VSPHERE_VCENTER_IP: - BBL_VSPHERE_VCENTER_PASSWORD: - BBL_VSPHERE_VCENTER_USER: - BBL_VSPHERE_VCENTER_DC: - BBL_NSXT_MANAGER_HOST: - BBL_NSXT_USERNAME: - BBL_NSXT_PASSWORD: - - # The filter is use to match by name what resources to delete. - # It will match the string anywhere in the name, so be precise. - # For example, `ci` will match the name `ci-hello` and `hello-ci`. - FILTER: - - # The default behaviour of this task is to be non-destructive. - # This way no assets will be *accidentally* deleted before the dry-run. - # When enable with `true`, it will delete all the things matching the filter. - NO_CONFIRM: false - # When enable with `true`, it will just display the things that can be cleaned up. - DRY_RUN: true -run: - path: bash - args: - - -c - - | - - set -eu - - if [ "$BBL_IAAS" == "" ]; then - echo "BBL_IAAS is required" - exit 1 - fi - - if [ "$FILTER" == "" ]; then - echo "FILTER is required" - exit 1 - fi - - args=() - - if [ "$NO_CONFIRM" == "true" ]; then - args+=("--no-confirm") - fi - - if [ "$DRY_RUN" == "true" ]; then - args+=("--dry-run") - fi - - if [ "$BBL_GCP_SERVICE_ACCOUNT_KEY" != "" ]; then - file=$(mktemp) - echo "$BBL_GCP_SERVICE_ACCOUNT_KEY" > "$file" - export BBL_GCP_SERVICE_ACCOUNT_KEY="$file" - fi - - args+=("--filter" "$FILTER") - - set -x - - # shellcheck disable=SC2068 - leftovers ${args[@]} diff --git a/ci/tasks/make-commit/task.sh b/ci/tasks/make-commit/task.sh deleted file mode 100755 index e60faa48..00000000 --- a/ci/tasks/make-commit/task.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -git clone deployments deployments-updated -path=deployments-updated/platform-automation/"$IAAS"/state -mkdir -p "$path" -cp generated-state/state.yml "$path"/state.yml -cd deployments-updated - -git add -A -git commit -m "adding state file for $IAAS" || true diff --git a/ci/tasks/make-commit/task.yml b/ci/tasks/make-commit/task.yml deleted file mode 100644 index 5e04555e..00000000 --- a/ci/tasks/make-commit/task.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -platform: linux -inputs: -- name: generated-state -- name: deployments -- name: docs-platform-automation -outputs: -- name: deployments-updated -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -params: - IAAS: -run: - path: docs-platform-automation/ci/tasks/make-commit/task.sh diff --git a/ci/tasks/package-for-release/platform-automation.yml b/ci/tasks/package-for-release/platform-automation.yml deleted file mode 100644 index 568ff7b1..00000000 --- a/ci/tasks/package-for-release/platform-automation.yml +++ /dev/null @@ -1,109 +0,0 @@ ---- -platform: linux - -inputs: -- name: docs-platform-automation -- name: version -- name: platform-automation-image - optional: true -- name: vsphere-platform-automation-image - optional: true -- name: osl -- name: odp - optional: true - -outputs: -- name: packaged-product -- name: platform-automation-image -- name: vsphere-platform-automation-image - -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -run: - path: bash - args: - - -c - - | - set -eux - - # construct package name - VERSION="$(cat version/version)" - TASKS_FILE="platform-automation-tasks-$VERSION.zip" - IMAGE_FILE="platform-automation-image-$VERSION.tgz" - VSPHERE_IMAGE_FILE="vsphere-platform-automation-image-$VERSION.tar.gz" - - # create a temp working folder - mkdir file-store - - # copy tasks to working folder - mv docs-platform-automation/tasks file-store/tasks - - # pack the tasks to output - pushd file-store - zip -r "../packaged-product/$TASKS_FILE" ./ - popd - - function package_for_release { - image=$1 - path=$2 - - # tar the docker image to output - ls "${path}"/rootfs - pushd "${path}"/rootfs - rootfs_level_directories=$(ls) - popd - - pushd "${path}" - for dir in $rootfs_level_directories; do - echo "linking $dir ..." - ln -sf ./rootfs/"$dir" ./"$dir" - done - rm -Rf etc sys proc - cp -r ./rootfs/etc ./etc - mkdir -p ./sys - mkdir -p ./proc - - - cp ../version/version var/version - chown root:root var/version - chmod 444 var/version - popd - - cp osl/open_source_license*.txt "${path}"/usr/share/doc/open_source_licenses.txt - - GZIP=-9 tar zcf "packaged-product/${image}" --exclude="./image" -C "${path}" . - } - - # This makes sure that the final promotion build of these images - # have the same version as the images diverge in testing paths - # and come back together in the final promotion job - if [ -e platform-automation-image/var/version ] && [ -e vsphere-platform-automation-image/var/version ]; then - set +e - diff platform-automation-image/var/version vsphere-platform-automation-image/var/version - exit_code=$? - if [ $exit_code -ne 0 ]; then - echo "Versions of image inputs did not match. Make sure the image inputs have the same version." - exit 1 - fi - set -e - fi - - # This is to support building the "All" image (only) - if [ -n "$(ls -A platform-automation-image 2>/dev/null)" ]; then - package_for_release "${IMAGE_FILE}" platform-automation-image - fi - - # This is to support building the "vSphere" image (only) - if [ -n "$(ls -A vsphere-platform-automation-image 2>/dev/null)" ]; then - package_for_release "${VSPHERE_IMAGE_FILE}" vsphere-platform-automation-image - fi - - # copy OSL and ODP to output for bump pipelines - cp osl/open_source_license*.txt packaged-product/open_source_license_Platform_Automation_Toolkit_for_VMware_Tanzu_"$VERSION"_GA.txt - if [ -d odp/ ]; then - cp odp/VMware-Tanzu-platform-automation-toolkit*.tar.gz packaged-product/VMware-Tanzu-platform-automation-toolkit-"$VERSION"-ODP.tar.gz - fi - diff --git a/ci/tasks/pivnet-release/define-cli-versions.yml b/ci/tasks/pivnet-release/define-cli-versions.yml deleted file mode 100644 index b5a26999..00000000 --- a/ci/tasks/pivnet-release/define-cli-versions.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -platform: linux - -inputs: -- name: winfs-injector -- name: credhub-cli -- name: bosh-cli -- name: om-cli -- name: version -outputs: -- name: cli-versions-table - -run: - path: bash - args: - - -c - - | - set -eux - - AWS_VERSION="$(aws --version | awk '{ print $1 }' | cut -d '/' -f 2)" - AZURE_VERSION="$(az --version | grep azure-cli | awk '{ print $2 }' | head -n1)" - BBR_VERSION="$(bbr version | awk '{ print $3 }')" - BOSH_VERSION="$(cat bosh-cli/tag)" - CREDHUB_VERSION="$(cat credhub-cli/tag)" - GCLOUD_VERSION="$(gcloud --version | grep 'Google Cloud SDK' | awk '{ print $4 }' | head -n1)" - GOVC_VERSION="$(govc version | awk '{ print $2 }' | head -n1)" - OM_VERSION="$(om --version)" - WINFS_VERSION="$(cat winfs-injector/tag)" - - cat >> cli-versions-table/bump-image-cli-versions-"$(cat version/version)".md < ci/patch-notes/cve-patch-notes.md - if [[ -n $(git status --porcelain) ]]; then - git add -A - git commit -m "remove documented shared release notes" - fi - popd diff --git a/ci/tasks/pivnet-release/generate-platform-automation-metadata-bump.yml b/ci/tasks/pivnet-release/generate-platform-automation-metadata-bump.yml deleted file mode 100644 index 3fc50102..00000000 --- a/ci/tasks/pivnet-release/generate-platform-automation-metadata-bump.yml +++ /dev/null @@ -1,141 +0,0 @@ ---- -platform: linux -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing - -inputs: -- name: version -outputs: -- name: metadata - -run: - path: bash - args: - - -c - - | - set -eux - VERSION="$(cat version/version)" - DOC_VERSION=$(echo "$VERSION" | cut -f 1,2 -d '.' --output-delimiter='.') - - # TODO: change according to https://github.com/pivotal-cf/pivnet-resource/tree/master/metadata - cat >> metadata/metadata.yml <> metadata/metadata.yml <> metadata/metadata.yml <> metadata/metadata.yml <> metadata/metadata.yml < - A docker image for use with - Platform Automation for PCF Concourse Tasks, - containing all dependencies. - - file: "packaged-product/platform-automation-tasks-${VERSION}.zip" - upload_as: Concourse Tasks - description: Concourse Tasks to be used in the creation of pipelines to automate PCF. - - file: "packaged-product/VMware-Tanzu-platform-automation-toolkit-${VERSION}-ODP.tar.gz" - upload_as: ODP -${VERSION} - file_type: "Open Source License" - - file: "packaged-product/open_source_license_Platform_Automation_Toolkit_for_VMware_Tanzu_${VERSION}_GA.txt" - upload_as: OSL -${VERSION} - file_type: "Open Source License" - dependency_specifiers: - - specifier: 2.3.* - product_slug: ops-manager - - specifier: 2.4.* - product_slug: ops-manager - - specifier: 2.5.* - product_slug: ops-manager - - specifier: 2.6.* - product_slug: ops-manager - - specifier: 2.7.* - product_slug: ops-manager - - specifier: 2.8.* - product_slug: ops-manager - - specifier: 2.9.* - product_slug: ops-manager - - specifier: 2.10.* - product_slug: ops-manager - - specifier: 3.0.* - product_slug: ops-manager - upgrade_path_specifiers: - - specifier: 4.1.* - - specifier: 4.0.* - - specifier: 3.0.* - EOF - - if [[ "$DOC_VERSION" == "5.1" ]]; then - echo "Inserting upgrade path specifiers for 5.1..." - cat >> metadata/metadata.yml <> metadata/metadata.yml <> metadata/metadata.yml <> metadata/metadata.yml <> metadata/metadata.yml <> metadata/metadata.yml < - A docker image for use with - Platform Automation for PCF Concourse Tasks on all IaaSes, - containing all dependencies. - - file: "packaged-product/vsphere-platform-automation-image-${VERSION}.tar.gz" - upload_as: Docker Image for Concourse Tasks on vSphere - description: > - A docker image for use with - Platform Automation for PCF Concourse Tasks on vSphere, - containing all dependencies. - - file: "packaged-product/platform-automation-tasks-${VERSION}.zip" - upload_as: Concourse Tasks - description: Concourse Tasks to be used in the creation of pipelines to automate PCF. - - file: "packaged-product/VMware-Tanzu-platform-automation-toolkit-${VERSION}-ODP.tar.gz" - upload_as: ODP -${VERSION} - file_type: "Open Source License" - - file: "packaged-product/open_source_license_Platform_Automation_Toolkit_for_VMware_Tanzu_${VERSION}_GA.txt" - upload_as: OSL -${VERSION} - file_type: "Open Source License" - dependency_specifiers: - - specifier: 2.3.* - product_slug: ops-manager - - specifier: 2.4.* - product_slug: ops-manager - - specifier: 2.5.* - product_slug: ops-manager - - specifier: 2.6.* - product_slug: ops-manager - - specifier: 2.7.* - product_slug: ops-manager - - specifier: 2.8.* - product_slug: ops-manager - - specifier: 2.9.* - product_slug: ops-manager - - specifier: 2.10.* - product_slug: ops-manager - - specifier: 3.0.* - product_slug: ops-manager - upgrade_path_specifiers: - - specifier: 4.1.* - - specifier: 4.0.* - - specifier: 3.0.* - - specifier: 5.0.* - - specifier: 4.4.* - - specifier: 4.3.* - - specifier: 4.2.* - EOF \ No newline at end of file diff --git a/ci/tasks/pivnet-release/generate-release-notes.yml b/ci/tasks/pivnet-release/generate-release-notes.yml deleted file mode 100644 index b7768d4e..00000000 --- a/ci/tasks/pivnet-release/generate-release-notes.yml +++ /dev/null @@ -1,84 +0,0 @@ ---- -platform: linux -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing - -params: - GITHUB_SSH_KEY: -inputs: -- name: version -- name: binaries-table - -run: - path: bash - args: - - -c - - | - set -eux - - workspace=$PWD - - echo "Setting git key..." - echo "$GITHUB_SSH_KEY" > key - chmod 0600 key - export GIT_SSH_COMMAND="ssh -i $PWD/key -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - - VERSION="$(cat version/version)" - DOC_VERSION=$(echo "$VERSION" | cut -f 1,2 -d '.' --output-delimiter='.') - - git clone git@github.com:pivotal/docs-platform-automation - - echo "Checking presence of release notes..." - set +e - grep -q "^## v$VERSION$" docs-platform-automation/docs/release-notes.md - version_exists=$? - if [ "$version_exists" == "0" ]; then - echo "Release Notes already exist for $VERSION. Skipping..." - exit 0 - fi - set -e - - echo "Generating release notes for $VERSION..." - pushd docs-platform-automation/ci - go run ./scripts/generate-release-notes/generate-release-notes.go \ - --docs-dir "$workspace"/docs-platform-automation \ - --patch-notes-path "$workspace"/docs-platform-automation/ci/patch-notes/cve-patch-notes.md \ - --patch-notes-path "$workspace"/docs-platform-automation/ci/patch-notes/"$DOC_VERSION"-patch-notes.md \ - --patch-versions "$VERSION" - popd - - pushd docs-platform-automation/ci - echo "Inserting version table into release notes..." - go run ./scripts/insert-cli-versions-into-release-notes/insert-cli-versions-into-release-notes.go \ - "$workspace"/docs-platform-automation/docs/release-notes.md \ - "$workspace"/binaries-table/bump-image-cli-versions* \ - "$VERSION" - popd - - pushd docs-platform-automation - git config --global user.name "platform-automation" - git config --global user.email "platformautomation@groups.vmware.com" - git add -A - git commit -m "Add release notes table" || true - git pull -r - git push - popd - - pushd docs-platform-automation/ci - echo "Porting version table to previous branches..." - go run ./scripts/generate-release-notes/generate-release-notes.go \ - --docs-dir "$workspace"/docs-platform-automation - popd - - pushd docs-platform-automation - cat /dev/null > ci/patch-notes/"$DOC_VERSION"-patch-notes.md - if [[ -n $(git status --porcelain) ]]; then - git add -A - git commit -m "remove documented release notes for $DOC_VERSION" - git pull -r - git push - fi - popd diff --git a/ci/tasks/python-mitigation.yml b/ci/tasks/python-mitigation.yml deleted file mode 100644 index 5534a3d6..00000000 --- a/ci/tasks/python-mitigation.yml +++ /dev/null @@ -1,45 +0,0 @@ -platform: linux -image_resource: - type: registry-image - source: - repository: concourse/docker-image-resource -inputs: -- name: platform-automation-image -- name: version -- name: docs-platform-automation -outputs: -- name: bumped-platform-automation-image -- name: image-receipt -run: - path: bash - args: - - -c - - | - #!/bin/bash - set -eu - - # shellcheck disable=SC1091 - source /opt/resource/common.sh - start_docker "5" "5" "" "" - - image_name="internalpcfplatformautomation/platform-automation:packages" - destination="$PWD/bumped-platform-automation-image" - - docker import platform-automation-image/*.tgz "$image_name" - docker build -t "$image_name" -f docs-platform-automation/ci/dockerfiles/Dockerfile.python-mitigation docs-platform-automation/ - docker run \ - --rm \ - "$image_name" \ - dpkg -l > image-receipt/image-receipt-"$(cat version/version)" - - # https://github.com/concourse/docker-image-resource/blob/2d1bbe884942aaa8dabd7a762ea0dc431c572a27/assets/in#L80 - docker run \ - --cidfile=/tmp/container.cid \ - -v /opt/resource/print-metadata:/tmp/print-metadata \ - --entrypoint /tmp/print-metadata \ - "$image_name" > "${destination}/metadata.json" - - # https://github.com/concourse/docker-image-resource/blob/2d1bbe884942aaa8dabd7a762ea0dc431c572a27/assets/in#L86 - docker export "$(cat /tmp/container.cid)" | tar --exclude="dev/*" -xf - -C "${destination}/" - docker rm "$(cat /tmp/container.cid)" - docker rmi "$image_name" diff --git a/ci/tasks/rename-windows-stemcell/task.rb b/ci/tasks/rename-windows-stemcell/task.rb deleted file mode 100755 index 23135057..00000000 --- a/ci/tasks/rename-windows-stemcell/task.rb +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env ruby - -puts "getting the stemcell version..." -stemcellFile=Dir["pas-windows-stemcell-pivnet/*windows*"] -version=stemcellFile[0].scan(/\d+\.\d+/).last - -puts "renaming the stemcell file..." -file = File.basename(stemcellFile[0]).prepend("[stemcells-windows-server,#{version}]") - -puts("mv #{stemcellFile[0]} stemcell/#{file}") -system("mv #{stemcellFile[0]} stemcell/#{file}") diff --git a/ci/tasks/rename-windows-stemcell/task.yml b/ci/tasks/rename-windows-stemcell/task.yml deleted file mode 100644 index 91a8c5ea..00000000 --- a/ci/tasks/rename-windows-stemcell/task.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -platform: linux -inputs: -- name: pas-windows-stemcell-pivnet -- name: docs-platform-automation -outputs: -- name: stemcell -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -run: - path: docs-platform-automation/ci/tasks/rename-windows-stemcell/task.rb diff --git a/ci/tasks/secrets-verifier/.rspec b/ci/tasks/secrets-verifier/.rspec deleted file mode 100644 index c99d2e73..00000000 --- a/ci/tasks/secrets-verifier/.rspec +++ /dev/null @@ -1 +0,0 @@ ---require spec_helper diff --git a/ci/tasks/secrets-verifier/Gemfile b/ci/tasks/secrets-verifier/Gemfile deleted file mode 100644 index fada118a..00000000 --- a/ci/tasks/secrets-verifier/Gemfile +++ /dev/null @@ -1,7 +0,0 @@ -# frozen_string_literal: true - -source 'https://rubygems.org' - -gem 'paint' -gem 'rspec' -gem 'rubocop' diff --git a/ci/tasks/secrets-verifier/Gemfile.lock b/ci/tasks/secrets-verifier/Gemfile.lock deleted file mode 100644 index 26f57d59..00000000 --- a/ci/tasks/secrets-verifier/Gemfile.lock +++ /dev/null @@ -1,44 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - ast (2.4.0) - diff-lcs (1.3) - jaro_winkler (1.5.4) - paint (2.2.0) - parallel (1.19.0) - parser (2.6.5.0) - ast (~> 2.4.0) - rainbow (3.0.0) - rspec (3.9.0) - rspec-core (~> 3.9.0) - rspec-expectations (~> 3.9.0) - rspec-mocks (~> 3.9.0) - rspec-core (3.9.0) - rspec-support (~> 3.9.0) - rspec-expectations (3.9.0) - diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.9.0) - rspec-mocks (3.9.0) - diff-lcs (>= 1.2.0, < 2.0) - rspec-support (~> 3.9.0) - rspec-support (3.9.0) - rubocop (0.76.0) - jaro_winkler (~> 1.5.1) - parallel (~> 1.10) - parser (>= 2.6) - rainbow (>= 2.2.2, < 4.0) - ruby-progressbar (~> 1.7) - unicode-display_width (>= 1.4.0, < 1.7) - ruby-progressbar (1.10.1) - unicode-display_width (1.6.0) - -PLATFORMS - ruby - -DEPENDENCIES - paint - rspec - rubocop - -BUNDLED WITH - 2.1.2 diff --git a/ci/tasks/secrets-verifier/finder.rb b/ci/tasks/secrets-verifier/finder.rb deleted file mode 100755 index 151c40e1..00000000 --- a/ci/tasks/secrets-verifier/finder.rb +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env ruby -# frozen_string_literal: true - -require 'json' -require 'yaml' -require 'paint' - -class Fly - def initialize(username:, password:, target:) - @username = username - @password = password - @target = target - end - - def login!(team: 'main') - system(%(fly -t finding-secrets login -u "#{@username}" -p "#{@password}" -c "#{@target}" -n #{team})) - end - - def teams - output = JSON.parse(`fly -t finding-secrets teams --json`) - output.map { |entry| entry['name'] } - end - - def pipelines(team:) - login!(team: team) - output = JSON.parse(`fly -t finding-secrets pipelines --json`) - output.map { |entry| entry['name'] } - end - - def jobs(pipeline:) - output = JSON.parse(`fly -t finding-secrets jobs --pipeline "#{pipeline}" --json`) - output.map { |entry| entry['name'] } - end - - def recent_build_id(job:, pipeline:) - output = JSON.parse(`fly -t finding-secrets builds --job "#{pipeline}/#{job}" --json`) - output.select { |entry| entry['end_time'] }.map { |entry| entry['name'] }.first - end - - def logs(job:, pipeline:, build:) - `fly -t finding-secrets watch --job "#{pipeline}/#{job}" --build "#{build}"` - end -end - -class Finder - def initialize(fly:, files:, allowlist:, skip_jobs:) - @fly = fly - @files = files - @allowlist = allowlist - @skip_jobs = skip_jobs - end - - def jobs_with_secrets - found = {} - @fly.teams.each do |team| - @fly.pipelines(team: team).each do |pipeline| - puts "Searching pipeline: #{pipeline}" - @fly.jobs(pipeline: pipeline).reject do |job| - @skip_jobs.include? ({ pipeline: pipeline, job: job }) - end.each do |job| - print "- job #{job}" - recent_build = @fly.recent_build_id(pipeline: pipeline, job: job) - unless recent_build - puts ' skipped' - next - end - - logs = @fly.logs(pipeline: pipeline, job: job, build: recent_build) - .gsub(/begin secrets-check ignore(.*?)end secrets-check ignore/m, '') - - found_secrets = secrets.select do |secret| - logs.include?(secret) - end - - puts ' checked' - - next if found_secrets.empty? - - found["#{pipeline}/#{job}/#{recent_build}"] = found_secrets - found_secrets.each do |s| - puts Paint[" * #{s}", :red] - end - end - end - end - found - end - - private - - def secrets - @secrets ||= begin - @files.map do |file| - payload = YAML.load_file(file) - unless payload.is_a?(Hash) - raise "payload is unexpected format from #{file} -- expected Hash got #{payload.class}" - end - - hash_values(payload) - end - .flatten - .select { |s| s.is_a?(String) } - .reject { |s| @allowlist.any? { |w| s.match?(w) } } - .reject { |s| s.strip.empty? } - .uniq - end - end - - def hash_values(payload) - payload.values.map do |value| - if value.is_a?(Hash) - hash_values(value) - else - value - end - end - end -end - -if $PROGRAM_NAME == __FILE__ - fly = Fly.new( - username: ENV.fetch('FLY_USERNAME'), - password: ENV.fetch('FLY_PASSWORD'), - target: ENV.fetch('FLY_TARGET') - ) - fly.login! - - Dir['deployments/**/*.tfvars'].each do |file| - system("cat #{file} | yj -i -cj > #{file}.json") - end - - files = Dir['deployments/**/env.yml', 'deployments/**/*.tfvars.json'] - puts "files: #{files.inspect}" - - if files.empty? - raise 'no files for evaluating secrets were found -- ensure the directories are correct' - end - - finder = Finder.new( - skip_jobs: [{ pipeline: 'ci', job: 'check-for-secrets-in-tasks' }], - fly: fly, - files: files, - allowlist: ENV.fetch('SECRET_ALLOWLIST').split("\n").map { |item| Regexp.new(item) } - ) - - unless finder.jobs_with_secrets.empty? - puts 'Check secrets' - exit 1 - end -end diff --git a/ci/tasks/secrets-verifier/spec/finder_spec.rb b/ci/tasks/secrets-verifier/spec/finder_spec.rb deleted file mode 100644 index 9b097f34..00000000 --- a/ci/tasks/secrets-verifier/spec/finder_spec.rb +++ /dev/null @@ -1,56 +0,0 @@ -# frozen_string_literal: true - -require 'spec_helper' -require 'tempfile' -require 'yaml' - -RSpec.describe 'When there are secrets in a latest build' do - it 'reports an error' do - fly = Fly.new( - username: 'username', - password: 'password', - target: 'https://example.com' - ) - - fly.login! - - has_secrets = Tempfile.new - has_secrets.write(YAML.dump( - 'password' => 'super-secure-password', - 'username' => 'admin', - 'something' => false, - 'region' => 'west-us-2', - 'another-region' => 'us-central1-b', - 'some-space-to-ignore' => ' ' - )) - has_secrets.close - - expect(fly).to receive(:teams).and_return(%w[team-1]) - expect(fly).to receive(:pipelines).with(team: 'team-1').and_return(%w[pipeline1 pipeline2]) - expect(fly).to receive(:jobs).with(pipeline: 'pipeline1') - .and_return(%w[job-with-secrets job-without-secrets check-for-secrets-in-tasks]) - expect(fly).to receive(:jobs).with(pipeline: 'pipeline2') - .and_return(%w[nothing]) - expect(fly).to receive(:recent_build_id).and_return(123).exactly(2).times - expect(fly).to receive(:recent_build_id).and_return(nil) - expect(fly).to receive(:logs) - .with(pipeline: 'pipeline1', job: 'job-with-secrets', build: 123) - .and_return("super-secure-password\nadmin\nwest-us-2\nus-central1-b") - expect(fly).to receive(:logs) - .with(pipeline: 'pipeline1', job: 'job-without-secrets', build: 123) - .and_return("non-secret - ### begin secrets-check ignore ### - super-secure-password - ### end secrets-check ignore ### - ") - - finder = Finder.new( - fly: fly, - files: [has_secrets], - allowlist: ['admin', /us/], - skip_jobs: [{ pipeline: 'pipeline1', job: 'check-for-secrets-in-tasks' }] - ) - - expect(finder.jobs_with_secrets).to eq('pipeline1/job-with-secrets/123' => ['super-secure-password']) - end -end diff --git a/ci/tasks/secrets-verifier/spec/fly_spec.rb b/ci/tasks/secrets-verifier/spec/fly_spec.rb deleted file mode 100644 index e2764296..00000000 --- a/ci/tasks/secrets-verifier/spec/fly_spec.rb +++ /dev/null @@ -1,73 +0,0 @@ -# frozen_string_literal: true - -require 'spec_helper' - -RSpec.describe 'fly interface' do - let(:fly) do - Fly.new( - username: 'user', - password: 'password', - target: 'https://example.com' - ) - end - - before do - expect(fly).to receive(:system).with('fly -t finding-secrets login -u "user" -p "password" -c "https://example.com" -n main') - end - - context '#teams' do - it 'returns a list of teams' do - fly.login! - expect(fly).to receive('`') - .with('fly -t finding-secrets teams --json') - .and_return('[{"name":"team-1"},{"name":"team-2"}]') - pipeline = fly.teams - expect(pipeline).to eq %w[team-1 team-2] - end - end - - context '#pipelines' do - it 'returns a list of pipelines' do - fly.login! - expect(fly).to receive(:system).with('fly -t finding-secrets login -u "user" -p "password" -c "https://example.com" -n team-1') - expect(fly).to receive('`') - .with('fly -t finding-secrets pipelines --json') - .and_return('[{"name":"a"},{"name":"b"},{"name":"c"}]') - pipeline = fly.pipelines(team: 'team-1') - expect(pipeline).to eq %w[a b c] - end - end - - context '#jobs' do - it 'returns a list of jobs for a pipeline' do - fly.login! - expect(fly).to receive('`') - .with('fly -t finding-secrets jobs --pipeline "a" --json') - .and_return('[{"name":"b"},{"name":"c"}]') - pipeline = fly.jobs(pipeline: 'a') - expect(pipeline).to eq %w[b c] - end - end - - context '#recent_build_id' do - it 'returns the most recent build number for a pipeline job' do - fly.login! - expect(fly).to receive('`') - .with('fly -t finding-secrets builds --job "a/b" --json') - .and_return('[{"name":123},{"name":456, "end_time":12313123123}]') - pipeline = fly.recent_build_id(pipeline: 'a', job: 'b') - expect(pipeline).to eq 456 - end - end - - context '#logs' do - it 'returns logs for build number' do - fly.login! - expect(fly).to receive('`') - .with('fly -t finding-secrets watch --job "a/b" --build "123"') - .and_return('returns some logs') - pipeline = fly.logs(pipeline: 'a', job: 'b', build: 123) - expect(pipeline).to eq 'returns some logs' - end - end -end diff --git a/ci/tasks/secrets-verifier/spec/spec_helper.rb b/ci/tasks/secrets-verifier/spec/spec_helper.rb deleted file mode 100644 index 013f8a78..00000000 --- a/ci/tasks/secrets-verifier/spec/spec_helper.rb +++ /dev/null @@ -1,15 +0,0 @@ -# frozen_string_literal: true - -require_relative '../finder' - -RSpec.configure do |config| - config.expect_with :rspec do |expectations| - expectations.include_chain_clauses_in_custom_matcher_descriptions = true - end - - config.mock_with :rspec do |mocks| - mocks.verify_partial_doubles = true - end - - config.shared_context_metadata_behavior = :apply_to_host_groups -end diff --git a/ci/tasks/secrets-verifier/task.yml b/ci/tasks/secrets-verifier/task.yml deleted file mode 100644 index d1b58aea..00000000 --- a/ci/tasks/secrets-verifier/task.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -platform: linux -inputs: -- name: deployments -- name: docs-platform-automation -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -run: - path: bash - args: - - -c - - | - set -eux - - pushd docs-platform-automation/ci/tasks/secrets-verifier - bundle install - popd - ./docs-platform-automation/ci/tasks/secrets-verifier/finder.rb -params: - RUBYOPT: "-E utf-8:utf-8" diff --git a/ci/tasks/show-director-config.yml b/ci/tasks/show-director-config.yml deleted file mode 100644 index 3eef649c..00000000 --- a/ci/tasks/show-director-config.yml +++ /dev/null @@ -1,14 +0,0 @@ -platform: linux -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing -inputs: - - name: generated-config -run: - path: bash - args: - - -c - - | - cat generated-config/director.yml diff --git a/ci/tasks/srp-collect-and-submit/task b/ci/tasks/srp-collect-and-submit/task deleted file mode 100755 index 901b6f9d..00000000 --- a/ci/tasks/srp-collect-and-submit/task +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -set -euo pipefail - -trap 'catch' ERR -catch() { - echo "An error has occurred removing SRP data" - rm -rf ./srp_data -} -PAT_VERSION+=$(cat version-v5.0/number) -BUILD_PIPELINE_NAME=$(cat concourse-build-info/build-pipeline-name) -BUILD_JOB_NAME=$(cat concourse-build-info/build-job-name) -BUILD_ID=$(cat concourse-build-info/build-id) - -#$if [ -n "${DOMAIN}" ]; then -# DOMAIN="domain='${DOMAIN}'," -#fi -# uid.obj.build.concourse(instance='opsmanager',namespace='main',pipeline='my-pipeline',job='my-job',build_id='124169698') -SRP_UID="uid.obj.build.concourse(instance='${DOMAIN}',namespace='main',pipeline='${BUILD_PIPELINE_NAME}',job='${BUILD_JOB_NAME}',build_id='${BUILD_ID}')" -echo "SRP component UID generated: $SRP_UID" - -mkdir -p srp_data/ - -echo "$SRP_UID" > srp_data/srp_uid -echo "SRP component UID stored in: srp_data/srp_uid" - -echo "${BUILD_ID}}" > srp_data/build_number -echo "Build number stored in: srp_data/build_number" - -srp config auth --client-id "$CLIENT_ID" --client-secret "$CLIENT_SECRET" -cp "$HOME/.srp/config.yml" srp_data/config.yml -echo "SRP CLI config stored in: srp_data/config.yml" -echo "SRP CLI version: $(srp --version)" - -srp provenance source \ - --scm-type git \ - --name "platform-automation" \ - --path ./docs-platform-automation-v5.0 \ - --saveto ./provenance/source.json \ - --build-number "$BUILD_ID" \ - --version "$PAT_VERSION" \ - --all-ephemeral true \ - --build-type release \ - --comp-uid "$SRP_UID" - - echo "SRP Provenance info: " - cat ./provenance/source.json - - - # submit the merged provenance to SRP -SRP_UID="$(sed 's|/|%2F|g' < ./srp_data/srp_uid)" -FULL_SRP_UID="uid.mtd.provenance_2_5.fragment(obj_uid=$SRP_UID,revision='')" -echo "" -echo "Full SRP UID that will be used for upload:" -echo "$FULL_SRP_UID" - -srp metadata submit \ - --url https://apigw.vmware.com/v1/s1/api/helix-beta \ - --uid "$FULL_SRP_UID" \ - --path ./provenance/source.json \ No newline at end of file diff --git a/ci/tasks/srp-collect-and-submit/task.yml b/ci/tasks/srp-collect-and-submit/task.yml deleted file mode 100644 index 137f0fe1..00000000 --- a/ci/tasks/srp-collect-and-submit/task.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -platform: linux - -image_resource: - type: registry-image - source: - repository: harbor.dhaka.cf-app.com/srp/srp-helper-task - username: ((srp-cli-registry-creds.username)) - password: ((srp-cli-registry-creds.password)) - -inputs: -- name: docs-platform-automation -- name: docs-platform-automation-v5.0 -- name: concourse-build-info -- name: version-v5.0 - -outputs: -- name: srp-data -- name: provenance - -params: - CLIENT_ID: - CLIENT_SECRET: - DOMAIN: - -run: - path: docs-platform-automation/ci/tasks/srp-collect-and-submit/task diff --git a/ci/tasks/test-and-build-om.yml b/ci/tasks/test-and-build-om.yml deleted file mode 100644 index b0580265..00000000 --- a/ci/tasks/test-and-build-om.yml +++ /dev/null @@ -1,51 +0,0 @@ -platform: linux -image_resource: - type: registry-image - source: - repository: internalpcfplatformautomation/ci - tag: testing -inputs: -- name: om -- name: docs-platform-automation -outputs: -- name: om-cli -caches: -- path: go -params: - TEST_GCP_SERVICE_ACCOUNT_KEY: ((gcp.service_account)) - TEST_GCP_PROJECT_ID: ((gcp.project_id)) - TEST_AZURE_STORAGE_ACCOUNT: ((azure.storage.account)) - TEST_AZURE_STORAGE_KEY: ((azure.storage.key)) - TEST_AZURE_CONTAINER_NAME: ((azure.storage.container)) - TEST_PIVNET_TOKEN: ((pivnet_token)) -run: - path: "/bin/bash" - args: - - "-c" - - | - set -exu - export GOPATH="$(pwd)/go" - - go install github.com/onsi/ginkgo/ginkgo@latest - - export PATH="${GOPATH}/bin:${PATH}" - - pushd "om" > /dev/null - go mod download - go install github.com/pivotal-cf/om - - CGO_ENABLED=1 ginkgo \ - -r \ - -race \ - -succinct \ - -nodes 1 \ - -randomizeAllSpecs \ - -randomizeSuites \ - -keepGoing \ - . - - go version - go build -o ../om-cli/om \ - --ldflags "-X main.version=$(git rev-list --format=format:'%H-%aI' --max-count=1 "$(git rev-parse HEAD)" | tail -1)" \ - main.go - popd > /dev/null diff --git a/ci/tasks/test-docker-import/task.yml b/ci/tasks/test-docker-import/task.yml deleted file mode 100644 index 446a32e8..00000000 --- a/ci/tasks/test-docker-import/task.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -platform: linux -image_resource: - type: registry-image - source: - repository: concourse/docker-image-resource - -inputs: - - name: packaged-product - -params: - PRODUCT_PATH: platform-automation-image-*.tgz - -run: - path: bash - args: - - -c - - | - - set -eu - - # shellcheck disable=SC1091 - source /opt/resource/common.sh - start_docker "5" "5" "" "" - - set -eux - - # shellcheck disable=SC2086 - docker import packaged-product/${PRODUCT_PATH} testing - docker run testing om --version diff --git a/ci/tasks/update-docs-ref.yml b/ci/tasks/update-docs-ref.yml deleted file mode 100644 index e89d1591..00000000 --- a/ci/tasks/update-docs-ref.yml +++ /dev/null @@ -1,30 +0,0 @@ -platform: linux -run: - path: bash - args: - - -c - - | - set -eux - - git clone deployments deployments-updated - cd deployments-updated - git submodule init - git submodule update - pushd docs-platform-automation - git checkout develop - git pull - popd - git config --global user.name "platform-automation" - git config --global user.email "platformautomation@groups.vmware.com" - git add docs-platform-automation - git commit -m 'updated docs submodule ref' || true -inputs: -- name: deployments -- name: docs-platform-automation -outputs: -- name: deployments-updated -image_resource: - type: registry-image - source: - repository: ((docker.ci-repository)) - tag: testing diff --git a/docs/.disable-verifiers.md b/docs/.disable-verifiers.md deleted file mode 100644 index d77e1ea6..00000000 --- a/docs/.disable-verifiers.md +++ /dev/null @@ -1,17 +0,0 @@ -!!! info - Ops Manager Verifier failures when applying changes will prevent deployment. - In cases where these verifiers are incorrectly failing for known reasons, - they should be disabled [using om][disable-verifiers]. - The `IGNORE_WARNINGS` parameter for the - `apply-changes`, `stage-configure-apply`, and `apply-director-changes` tasks - allows users to ignore all warnings from ignorable verifiers. - In an automation context, disabling _only the particular verifiers_ - where failure is well-understood allows other verifiers - to continue to provide important feedback. - Some verifiers continue to return warnings even when disabled, - preventing deployment without the `IGNORE_WARNINGS: true` param set. - If the verifiers that are preventing deployment - are known issues based on the environment setup, - then it is safe to use the flag. - -{% include ".internal_link_url.md" %} diff --git a/docs/.export_installation_note.md b/docs/.export_installation_note.md deleted file mode 100644 index 815c06cf..00000000 --- a/docs/.export_installation_note.md +++ /dev/null @@ -1,6 +0,0 @@ -!!! warning "Always Export your Installation" - It is recommended to persist the zip file exported from export-installation - to an external file store (eg S3) on a regular basis. - The exported installation can restore the Ops Manager - to a working state if it is non-functional. - diff --git a/docs/.ip-addresses.md b/docs/.ip-addresses.md deleted file mode 100644 index 132f4cf0..00000000 --- a/docs/.ip-addresses.md +++ /dev/null @@ -1,3 +0,0 @@ -!!! info - At least one IP address (public or private) must be assigned to the Ops Manager VM. - Both can be assigned, too. \ No newline at end of file diff --git a/docs/.missing_fields_opsman_director.md b/docs/.missing_fields_opsman_director.md deleted file mode 100644 index e939130a..00000000 --- a/docs/.missing_fields_opsman_director.md +++ /dev/null @@ -1,3 +0,0 @@ -!!! info - staged-director-config will not be able to grab all sensitive fields in your Ops Manager installation - (for example: vcenter_username and vcenter_password if using vsphere). To find these missing fields, please refer to the [Ops Manager API Documentation][opsman-api] diff --git a/docs/.opsman_filename_change_note.md b/docs/.opsman_filename_change_note.md deleted file mode 100644 index cf302b8d..00000000 --- a/docs/.opsman_filename_change_note.md +++ /dev/null @@ -1,5 +0,0 @@ -!!! warning "Ops Manager 2.5" - The filename for the artifact downloaded from Ops Manager is changed! If your resources or pipelines - have a regex for the Ops Manager filename, you **may** be affected. (Please see - Ops Manager's [official notice](https://community.pivotal.io/s/article/ops-manager-2-5-changing-the-file-naming-scheme-might-break-the-pipeline-jobs) - for more information) diff --git a/docs/TOC/concepts-index.html.md.erb b/docs/TOC/concepts-index.html.md.erb new file mode 100644 index 00000000..39a88d09 --- /dev/null +++ b/docs/TOC/concepts-index.html.md.erb @@ -0,0 +1,6 @@ +# Concepts + +* [Using a secrets store to store credentials](../concepts/secrets-handling.html) +* [Handling stemcells](../concepts/stemcell-handling.html) +* [Variables](../concepts/variables.html) +* [Recovering and upgrading Operations Manager](../concepts/upgrade.html) \ No newline at end of file diff --git a/docs/TOC/how-to-index.html.md.erb b/docs/TOC/how-to-index.html.md.erb new file mode 100644 index 00000000..c121ceff --- /dev/null +++ b/docs/TOC/how-to-index.html.md.erb @@ -0,0 +1,12 @@ +# How-to topics + +* [Writing a pipeline to install Operations Manager](../how-to-guides/installing-opsman.html) +* [Upgrading an existing Operations Manager](../how-to-guides/upgrade-existing-opsman.html) +* [Generating an Auth file](../how-to-guides/configuring-auth.html) +* [Generating an Env file](../how-to-guides/configuring-env.html) +* [Creating a director config file](../how-to-guides/creating-a-director-config-file.html) +* [Extending a pipeline to install a product](../how-to-guides/adding-a-product.html) +* [Why use Git and GitHub?](../how-to-guides/git-repo-layout.html) +* [Running commands locally](../how-to-guides/running-commands-locally.html) +* [Setting up S3 for file storage](../how-to-guides/setting-up-s3.html) +* [Writing a pipeline to rotate the foundation CA](../how-to-guides/rotating-certificate-authority.html) \ No newline at end of file diff --git a/docs/TOC/pipeline-design-index.html.md.erb b/docs/TOC/pipeline-design-index.html.md.erb new file mode 100644 index 00000000..17a8d5ff --- /dev/null +++ b/docs/TOC/pipeline-design-index.html.md.erb @@ -0,0 +1,3 @@ +# Pipeline design + +* [Configuration management strategies](../pipeline-design/configuration-management-strategies.html) \ No newline at end of file diff --git a/docs/TOC/reference-pipelines-index.html.md.erb b/docs/TOC/reference-pipelines-index.html.md.erb new file mode 100644 index 00000000..a1830718 --- /dev/null +++ b/docs/TOC/reference-pipelines-index.html.md.erb @@ -0,0 +1,4 @@ +# Reference pipelines + +* [Retrieving external dependencies](../pipelines/resources.html) +* [Operations Manager & multiple products](../pipelines/multiple-products.html) \ No newline at end of file diff --git a/docs/TOC/tasks-index.html.md.erb b/docs/TOC/tasks-index.html.md.erb new file mode 100644 index 00000000..5b4d0f86 --- /dev/null +++ b/docs/TOC/tasks-index.html.md.erb @@ -0,0 +1,4 @@ +# Tasks + +* [Task reference](../tasks.html) +* [Task inputs and outputs](../inputs-outputs.html) \ No newline at end of file diff --git a/docs/.cf-partial-config-domain-interpolated.md b/docs/_cf-partial-config-domain-interpolated.html.md.erb similarity index 100% rename from docs/.cf-partial-config-domain-interpolated.md rename to docs/_cf-partial-config-domain-interpolated.html.md.erb diff --git a/docs/.cf-partial-config.md b/docs/_cf-partial-config.html.md.erb similarity index 92% rename from docs/.cf-partial-config.md rename to docs/_cf-partial-config.html.md.erb index ed4e995b..04e25a64 100644 --- a/docs/.cf-partial-config.md +++ b/docs/_cf-partial-config.html.md.erb @@ -1,6 +1,6 @@ ```yaml # base.yml -# An incomplete yaml response from om staged-config +# An incomplete YAML response from om staged-config product-name: cf product-properties: diff --git a/docs/_disable-verifiers.html.md.erb b/docs/_disable-verifiers.html.md.erb new file mode 100644 index 00000000..e8ee6941 --- /dev/null +++ b/docs/_disable-verifiers.html.md.erb @@ -0,0 +1,17 @@ +

+Tanzu Operations Manager Verifier failures when applying changes will prevent deployment. +In cases where these verifiers are incorrectly failing for known reasons, +they should be disabled using om. +The IGNORE_WARNINGS parameter for the +apply-changes, stage-configure-apply, and apply-director-changes tasks +allows users to ignore all warnings from ignorable verifiers. +In an automation context, disabling only the particular verifiers +where failure is well-understood allows other verifiers +to continue to provide important feedback. +Some verifiers continue to return warnings even when disabled, +preventing deployment without the IGNORE_WARNINGS: true param set. +If the verifiers that are preventing deployment +are known issues based on the environment setup, +then it is safe to use the flag.

+ +[//]: # ({% include ".internal_link_url.md" %}) diff --git a/docs/.docker-import-director.md b/docs/_docker-import-director.html.md.erb similarity index 100% rename from docs/.docker-import-director.md rename to docs/_docker-import-director.html.md.erb diff --git a/docs/_export_installation_note.html.md.erb b/docs/_export_installation_note.html.md.erb new file mode 100644 index 00000000..e38d48c3 --- /dev/null +++ b/docs/_export_installation_note.html.md.erb @@ -0,0 +1,5 @@ +

+VMware recommends persisting the zip file exported from export-installation +to an external file store (for example, S3) on a regular basis. +The exported installation can restore the Tanzu Operations Manager +to a working state if it is not working.

diff --git a/docs/.external_link_url.md b/docs/_external_link_url.html.md.erb similarity index 100% rename from docs/.external_link_url.md rename to docs/_external_link_url.html.md.erb diff --git a/docs/.internal_link_url.md b/docs/_internal_link_url.html.md.erb similarity index 92% rename from docs/.internal_link_url.md rename to docs/_internal_link_url.html.md.erb index 492e52a8..87ed6127 100644 --- a/docs/.internal_link_url.md +++ b/docs/_internal_link_url.html.md.erb @@ -1,4 +1,4 @@ - +[activate-certificate-authority]: {{ path }}tasks.md#activate-certificate-authority [advanced-pipeline-design]: {{ path }}pipeline-design/configuration-management-strategies.md#advanced-pipeline-design [apply-changes]: {{ path }}tasks.md#apply-changes [apply-director-changes]: {{ path }}tasks.md#apply-director-changes @@ -14,12 +14,14 @@ [configure-authentication]: {{ path }}tasks.md#configure-authentication [configure-director]: {{ path }}tasks.md#configure-director [configure-ldap-authentication]: {{ path }}tasks.md#configure-ldap-authentication +[configure-new-certificate-authority]: {{ path }}tasks.md#configure-new-certificate-authority [configure-opsman]: {{ path }}tasks.md#configure-opsman [configure-product]: {{ path }}tasks.md#configure-product [configure-saml-authentication]: {{ path }}tasks.md#configure-saml-authentication [create-vm]: {{ path }}tasks.md#create-vm [creating-resources-for-your-ops-manager]: {{ path }}how-to-guides/installing-opsman.md#creating-resources-for-your-ops-manager [credhub-interpolate]: {{ path }}tasks.md#credhub-interpolate +[delete-certificate-authority]: {{ path }}tasks.md#delete-certificate-authority [delete-vm]: {{ path }}tasks.md#delete-vm [director-configuration]: {{ path }}how-to-guides/creating-a-director-config-file.md [disable-verifiers]: {{ path }}how-to-guides/running-commands-locally.md#disable-verifiers @@ -32,6 +34,7 @@ [expiring-certificates]: {{ path }}tasks.md#expiring-certificates [export-installation]: {{ path }}tasks.md#export-installation [fly-download-image]: {{ path }}img/concourse-fly-download.png +[generate-certificate]: {{ path }}tasks.md#generate-certificate [generating-an-auth-file]: {{ path }}how-to-guides/configuring-auth.md#generating-an-auth-file [generating-env-file]: {{ path }}how-to-guides/configuring-env.md#generating-an-env-file [getting-started]: {{ path }}getting-started.md @@ -59,8 +62,10 @@ [product-configuration]: {{ path }}how-to-guides/adding-a-product.md [reference-pipeline]: {{ path }}pipelines/multiple-products.md [reference-resources]: {{ path }}pipelines/resources.md +[regenerate-certificates]: {{ path }}tasks.md#regenerate-certificates [replicate-product]: {{ path }}tasks.md#replicate-product [revert-staged-changes]: {{ path }}tasks.md#revert-staged-changes +[rotating-certificate-authority]: {{ path }}how-to-guides/rotating-certificate-authority.md [run-bosh-errand]: {{ path }}tasks.md#run-bosh-errand [running-commands-locally]: {{ path }}how-to-guides/running-commands-locally.md [secrets-handling]: {{ path }}concepts/secrets-handling.md diff --git a/docs/_ip-addresses.md.html.md.erb b/docs/_ip-addresses.md.html.md.erb new file mode 100644 index 00000000..5f35c07f --- /dev/null +++ b/docs/_ip-addresses.md.html.md.erb @@ -0,0 +1,3 @@ +

+At least one IP address (public or private) must be assigned to the Tanzu Operations Manager VM. +Both can be assigned, if required.

diff --git a/docs/_missing_fields_opsman_director.html.md.erb b/docs/_missing_fields_opsman_director.html.md.erb new file mode 100644 index 00000000..23861086 --- /dev/null +++ b/docs/_missing_fields_opsman_director.html.md.erb @@ -0,0 +1,4 @@ +

+staged-director-config will not be able to grab all sensitive fields in your Tanzu Operations Manager installation +(for example: vcenter_username and vcenter_password if using vSphere). To find these missing fields, see the +Tanzu Operations Manager API Documentation.

diff --git a/docs/compatibility-and-versioning.html.md.erb b/docs/compatibility-and-versioning.html.md.erb new file mode 100644 index 00000000..e0d2559e --- /dev/null +++ b/docs/compatibility-and-versioning.html.md.erb @@ -0,0 +1,138 @@ +# Compatibility and versioning + +This topic describes Platform Automation Toolkit dependencies and semantic versioning. + +## External dependencies + +Platform Automation Toolkit is designed to work with these dependencies. + + + + + + + + + + + + + + + + + + + + + + + + +
Platform Automation ToolkitConcourseTanzu Operations ManagerBroadcom Support Portal
v5.1.2+v6.7.9+2v2.9+v0.31.15
v5.1.0v5.0.0+1v2.9+v0.31.15
+ +1 + [`prepare-tasks-with-secrets`](./tasks.html#prepare-tasks-with-secrets) replaces [`credhub-interpolate`](./tasks.html#credhub-interpolate) in Concourse 5.x+ _only_. + If using Concourse 4.x, continue using `credhub-interpolate`. + If using Concourse 5.x+, it is strongly recommended that you switch to `prepare-tasks-with-secrets`. + For more information about secrets handling, see [Secrets Handling](./concepts/secrets-handling.html). + +2 + v4.4.32, v5.0.25, and v5.1.2 introduced a version of the Concourse image based on Ubuntu Jammy. + Because of fundamental issues with the cgroup-to-cgroupv2 transition that happened between bionic and jammy, this requires changes to Concourse that are only available in Concourse v6.7.9+. + If you are using a version of Concourse prior to v6.7.9, you must use the Ubuntu Bionic-based image. + + +## Semantic versioning +This product uses [semantic versioning](https://semver.org/) 2.0.0 +to describe the impact of changes to the Concourse tasks. In order to take advantage of semantic versioning, you must declare an API. + +The following are considered part of the API: + +- The Concourse tasks: + + - inputs and outputs (including the format/required information in config files) + - specified parameters + - intended and specified functionality + + These are all documented for each task in the task files themselves. + +- The minimum compatible versions + of Concourse and Tanzu Operations Manager + are part of the API, + and are specified in the table shown earlier on this page. + +- The binaries on the _combined image_, which include: + + - bash + - build-essential + - curl + - gettext + - git + - netcat-openbsd + - python3-pip + - python3-setuptools + - rsync + - ssh + - unzip + - zip + - gcloud + - python-openstackclient + - awscli + - azure-cli + - bbr-cli + - bosh-cli + - credhub-cli + - govc + - isolation-segment-replicator + - om + - p-automator + - winfs-injector + +- The patterns necessary to specify the files on Tanzu Network: + It is considered a breaking change + if any of the following glob patterns for the Platform Automation Toolkit image and tasks + fail to return a single match + when used with the `pivnet-resource` and/or `download-product` task: +
+    platform-automation-image-*.tgz             # all IaaSes image
+    vsphere-platform-automation-image-*.tar.gz  # vSphere only image
+    platform-automation-tasks-*.zip             # tasks
+
+ +The following are NOT covered: + +- the `om` command line tool +- the `p-automator` command line tool +- the dependencies on the image intended to be used with the tasks +- non-specified parameters (for instance, any env var used by a CLI you call, + but not specified as a parameter on the task) +- properties specific to particular product or Tanzu Operations Manager versions in config files + (which are governed by the product being configured, not our tooling) +- Versions of the included binaries. + The _presence_ of those binaries is guaranteed, but the _versions_ are not. + +In general, if we make any change +that we anticipate could not be consumed without manual changes, +we consider it a breaking change, and increment the major version accordingly. + +This assumes that the required image can be made automatically available; +each version of our tasks is designed for and tested with +_only_ the version of the image that shipped with it. + +If we accidentally violate our semver, +we will publish an additional version addressing the problem. +In some cases, that may mean releasing the same software with a corrected version, +and shipping a new patch version identical to the version prior to the violation. +In others, it may mean releasing an additional patch version +which reverts an unintentional breaking change. + +This should make it safe to automatically consume our release. +It should be very safe to automatically update to patch releases. +Minor versions should be safe, +but it can be more difficult to anticipate the effect of new features, so this is slightly riskier. +Major versions should be expected to break +for at least some users when consumed automatically. +Automatic consumption of major versions should be limited +to test and staging environments +intended to endure and detect such breakage. diff --git a/docs/compatibility-and-versioning.md b/docs/compatibility-and-versioning.md deleted file mode 100644 index 22c67f9c..00000000 --- a/docs/compatibility-and-versioning.md +++ /dev/null @@ -1,168 +0,0 @@ -This topic describes Platform Automation Toolkit dependencies and semantic versioning. - -## External Dependencies -Platform Automation Toolkit is designed to work with these dependencies. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Platform Automation ToolkitConcourseOps ManagerPivnet Resource
v5.1.0v5.0.0+1v2.3+v0.31.15
v5.0.0v5.0.0+1v2.3+v0.31.15
v4.3.0v4.0.0+1v2.3+v0.31.15
v4.2.0v4.0.0+v2.3+v0.31.15
v4.1.0v4.0.0+v2.3+v0.31.15
v4.0.0v4.0.0+v2.3+v0.31.15
- -1 - [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] replaces [`credhub-interpolate`][credhub-interpolate] in Concourse 5.x+ _only_. - If using Concourse 4.x, continue using `credhub-interpolate`. - If using Concourse 5.x+, it is strongly recommended to switch to `prepare-tasks-with-secrets`. - For more information about secrets handling, reference the [Secrets Handling Page][secrets-handling]. - -{% include "./.opsman_filename_change_note.md" %} - -## Semantic Versioning -This product uses [semantic versioning][semver] 2.0.0 -to describe the impact of changes to our concourse tasks. In order to take advantage of semantic versioning, we must declare an API. - -The following are considered part of our API: - -- Our concourse tasks': - - - inputs and outputs (including the format/required information in config files) - - specified parameters - - intended and specified functionality - - These are all documented for each task within the task files themselves. - -- The minimum compatible version - of Concourse and Ops Manager - are part of the API, - and are specified [here][external-deps]. - -- The presence of the following binaries on the _combined image_: - - - bash - - build-essential - - curl - - gettext - - git - - netcat-openbsd - - python3-pip - - python3-setuptools - - rsync - - ssh - - unzip - - zip - - gcloud - - python-openstackclient - - awscli - - azure-cli - - bbr-cli - - bosh-cli - - credhub-cli - - govc - - isolation-segment-replicator - - om - - p-automator - - winfs-injector - -- The patterns necessary to specify our files on Tanzu Network: - We will consider it a breaking change - if any of the following glob patterns for the Platform Automation Toolkit image and tasks - fail to return a single match - when used with the `pivnet-resource` and/or `download-product` task: - - `platform-automation-image-*.tgz` # all IaaSes image - - `vsphere-platform-automation-image-*.tar.gz` # vSphere only image - - `platform-automation-tasks-*.zip` # tasks - - -The following are NOT covered: - -- the `om` command line tool -- the `p-automator` command line tool -- the dependencies on the image intended to be used with our tasks -- non-specified parameters (for instance, any env var used by a CLI we call - but not specified as a parameter on the task) -- properties specific to particular product or ops manager versions in config files - (which are governed by the product being configured, not our tooling) -- Versions of the included binaries. - The _presence_ of those binaries are guaranteed, but the _versions_ are not. - -In general, if we make any change -that we anticipate could not be consumed without manual changes, -we consider it a breaking change, and increment the major version accordingly. - -This assumes that the required image can be made automatically available; -each version of our tasks is designed for and tested with -_only_ the version of the image that shipped with it. - -If we accidentally violate our semver, -we will publish an additional version addressing the problem. -In some cases, that may mean releasing the same software with a corrected version, -and shipping a new patch version identical to the version prior to the violation. -In others, it may mean releasing an additional patch version -which reverts an unintentional breaking change. - -This should make it safe to automatically consume our release. -Patch releases should be very safe to automatically update to. -Minor versions should be safe, -but it can be more difficult to anticipate the effect of new features, -so this is slightly riskier. -Major versions should be expected to break -for at least some users when consumed automatically. -Automatic consumption of major versions should be limited -to test/staging environments -intended to endure and detect such breakage. - - -{% include ".internal_link_url.md" %} -{% include ".external_link_url.md" %} - -[semver]: https://semver.org -[external-deps]: #external-dependencies diff --git a/docs/concepts/secrets-handling.md b/docs/concepts/secrets-handling.md deleted file mode 100644 index 6e2c5672..00000000 --- a/docs/concepts/secrets-handling.md +++ /dev/null @@ -1,585 +0,0 @@ -## Using a Secrets Store to Store Credentials -Secrets stores, such as Credhub, can be used to store secure properties that you don't want committed into a config file. -Within your pipeline, the config file can then reference that secrets store value for runtime evaluation. - -Platform Automation Toolkit Tasks contains two tasks to help with retrieving these credentials in the tasks that use them: - -1. If you're using Concourse version 5 or newer - the [`prepare-tasks-with-secrets`](#using-prepare-tasks-with-secrets) task can be used with any Concourse supported [secrets store][concourse-secrets-handling]. -2. The [`credhub-interpolate`](#using-credhub-interpolate) task can only be used with Credhub. - -### Using prepare-tasks-with-secrets -The [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task takes a set of tasks -and modifies them to include environment variables referencing the variables found in the provided config files. -This allows use of the native [Concourse secrets handling][concourse-secrets-handling] -and provides support for any secret store Concourse supports. - -The [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task -replaces the [credhub-interpolate][credhub-interpolate] task on Concourse versions 5.x+ -and provides the following benefits: - -* Support for all native Concourse secrets stores including Credhub and Vault. -* Credhub credentials are no longer required by the task so they can be completely handled by concourse. -* Secrets are no longer written to disk which alleviates some security concerns. - -The [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task can be used two ways: - -* Adding to a pipeline without an already implemented credhub-interpolate task -* [Replacing an already implemented credhub-interpolate task](#replacing-credhub-interpolate-with-prepare-tasks-with-secrets) - -!!! info "All Variables Must Exist" - If using `prepare-tasks-with-secrets`, _all secrets_ must exist in either a secrets store - or a vars file found under `VARS_PATHS`. - If a vars from a config file can't be found in credhub, - it must be available in a yaml file found under `VARS_PATHS` in `prepare-tasks-with-secrets`. - This will prevent those credentials from being added as environment variables to the task - resulting in Concourse being unable to find them in the secrets store. - -To understand how `prepare-tasks-with-secrets` modifies the Platform Automation Toolkit tasks, -below is an example of how a task will be changed: - -1. Authenticate with your credhub instance. -2. Generate a username and password: - ```bash - credhub generate --name="/concourse/:team_name/:pipeline_name/vcenter_login" --type=user --username=some-user - ``` -3. Create a director configuration file that references the properties - using the om interpolation syntax: - - ```yaml - properties-configuration: - iaas_configuration: - vcenter_host: ((vcenter_host)) - vcenter_username: ((vcenter_login.username)) - vcenter_password: ((vcenter_login.password)) - ``` - -4. (Optional) Create vars files with additional variables not stored in the secrets store. - - We recommend this only for non-secret variables. - It's more secure to store secrets in the secrets store. - If using multiple foundations, there are some cases where - a foundation-specific key might not be sensitive, - but should be extracted to allow reuse of the config file between foundations. - If using a single config file for multiple foundations, - vars files may be used instead of storing those variables in a secrets store. - - For example: - - ```yaml - vcenter_host: vcenter.example.com - ``` - -5. Configure your pipeline to use the [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task. - - * The `config` input is required and is a directory that contains your configuration file from (3). - * The `tasks` input is required and is the set of tasks that will be modified. - * The `vars` input and `VARS_PATHS` param are _only_ required - if vars files are being used in subsequent tasks - * The `output_mapping` section is required and is where the modified tasks will be. - - The declaration within a pipeline might look like: - - ```yaml - - task: prepare-tasks-with-secrets - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - image: platform-automation-image - input_mapping: - tasks: platform-automation-tasks - config: deployments - vars: deployments # required only if using vars - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: ((foundation))/config - VARS_PATHS: ((foundation))/vars # required only if using vars - ``` - - !!! info - Unlike with [`credhub-interpolate`][credhub-interpolate], there is no concept of `SKIP_MISSING`. - As such, if there are credentials that will be filled in future jobs by vars files, - those vars files must be provided in the `vars` input and the `VARS_PATHS` param. - - This task will replace all of the tasks provided in the `tasks` input with the modified tasks. - The modified tasks include an extended `params` section with the secret references detected from the config files. - -6. Use the modified tasks in future jobs. - -Here's an example of what `prepare-tasks-with-secrets` is doing internally. -Given an original task and the previously provided config/vars files: - -```yaml -# Original Platform Automation Toolkit Task -platform: linux - -inputs: -- name: platform-automation-tasks -- name: config # contains the director configuration file -- name: env # contains the env file with target OpsMan Information -- name: vars # variable files to be made available - optional: true -- name: secrets - # secret files to be made available - # separate from vars, so they can be store securely - optional: true -- name: ops-files # operations files to custom configure the product - optional: true - -params: - VARS_FILES: - # - Optional - # - Filepath to the Ops Manager vars yaml file - # - The path is relative to root of the task build, - # so `vars` and `secrets` can be used. - - OPS_FILES: - # - Optional - # - Filepath to the Ops Manager operations yaml files - # - The path is relative to root of the task build - - ENV_FILE: env.yml - # - Required - # - Filepath of the env config YAML - # - The path is relative to root of the `env` input - - DIRECTOR_CONFIG_FILE: director.yml - # - Required - # - Filepath to the director configuration yaml file - # - The path is relative to the root of the `config` input - -run: - path: platform-automation-tasks/tasks/configure-director.sh -``` - -The `prepare-tasks-with-secrets` task will modify the original task -to have the variables found in `director.yml` embedded in the `params` section. -Any variables found in the `vars.yml` file will not be included in the modified task. -The `params` added will have a prefix of `OM_VAR`, so there are no collisions. -The task is a programmatically modified YAML file, so the output loses the comments and keys are sorted. - -```yaml -# prepare-job-with-secrets Generated Task -inputs: -- name: platform-automation-tasks -- name: config -- name: env -- name: vars - optional: true -- name: secrets - optional: true -- name: ops-files - optional: true - -params: - DIRECTOR_CONFIG_FILE: director.yml - ENV_FILE: env.yml - OM_VAR_vcenter_password: ((vcenter_password)) - OM_VARS_ENV: OM_VAR - OPS_FILES: - VARS_FILES: - -platform: linux - -run: - path: platform-automation-tasks/tasks/configure-director.sh -``` - -#### Replacing credhub-interpolate with prepare-tasks-with-secrets -If you already have implemented the [`credhub-interpolate`][credhub-interpolate] task within your pipeline, -this solution should be a _drop in replacement_ if you are not using vars files. -Note this is only a replacement if using Concourse 5.x or greater. - -If you are using vars files, the `vars` input and the `VARS_PATHS` param will also need to be set on the `prepare-tasks-with-secrets` task. - -For example, if the existing `credhub-interpolate` task looks like this: - ----excerpt--- "examples/credhub-interpolate-usage" - -In the task definition (above), you've had to define the prefix and Credhub authorization credentials. -The new `prepare-tasks-with-secrets` task uses concourse's native integration with Credhub (and other credential managers). -The above definition can be replaced with the following: - -```yaml -- task: prepare-tasks-with-secrets - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - config: configuration - vars: configuration # required only if using vars - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: ((foundation))/config - VARS_PATHS: ((foundation))/vars # required only if using vars -``` - -!!! info "If Using Vars Files" - If using vars files in subsequent tasks, the `vars` input and the `VARS_PATHS` param must be used to prevent - interpolation errors in those subsequent tasks. - -Notice in the above: - -* The `output_mapping`, which is required. - This will replace all `platform-automation-tasks` with the tasks that we have modified. - The modification now includes an extended `params` that now includes the secret references detected from the config files. -* The `INTERPOLATION_PATHS` is now `CONFIG_PATHS`. - The concept of reading the references from the config files is still here, - but no interpolation actually happens. -* The `PREFIX` is no longer defined or provided. - Since the tasks are using concourse's native credential management, the lookup path is predetermined. - For example, `/concourse/:team_name/:cred_name` or `/concourse/:team_name/:pipeline_name/:cred_name`. - -### Using credhub-interpolate -The [credhub-interpolate][credhub-interpolate] task can only be used with Credhub. - -**If using Concourse 5.x+, It is recommended to use the [prepare-tasks-with-secrets][prepare-tasks-with-secrets] task instead.** - -An example workflow would be storing an SSH key. - -1. Authenticate with your credhub instance. -2. Generate an ssh key: `credhub generate --name="/concourse/:team_name/:pipeline_name/opsman_ssh_key" --type=ssh` -3. Create an [Ops Manager configuration][opsman-config] file that references the name of the property. - -```yaml -opsman-configuration: - azure: - ssh_public_key: ((opsman_ssh_key.public_key)) -``` - -4. Configure your pipeline to use the [credhub-interpolate][credhub-interpolate] task. - It takes an input called `files`, which should contain your configuration file from (3). - - The declaration within a pipeline might look like: - -```yaml -jobs: -- name: example-job - plan: - - get: platform-automation-tasks - - get: platform-automation-image - - get: config - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - input_mapping: - files: config - params: - # depending on credhub configuration - # ether CA cert or secret are required - CREDHUB_CA_CERT: ((credhub_ca_cert)) - CREDHUB_SECRET: ((credhub_secret)) - - # all required - CREDHUB_CLIENT: ((credhub_client)) - CREDHUB_SERVER: ((credhub_server)) - PREFIX: /concourse/:team_name/:pipeline_name - SKIP_MISSING: true -``` - -Notice the `PREFIX` has been set to `/concourse/:team_name/:pipeline_name`, the path prefix defined for your cred in (2). -This allows the config file to have values scoped, for example, per foundation. -`params` should be filled in by the credhub created with your Concourse instance. - -!!! info - You can set the param `SKIP_MISSING:false` to enforce strict checking of - your vars files during intrpolation. This is true by default to support - credential management from multiple sources. For more information, see the - [Multiple Sources](#credub-interpolate-and-vars-files) section. - -This task will reach out to the deployed credhub and fill in your entry references and return an output -named `interpolated-files` that can then be read as an input to any following tasks. - -Our configuration will now look like - -```yaml -opsman-configuration: - azure: - ssh_public_key: ssh-rsa AAAAB3Nz... -``` - -!!! info - If using this you need to ensure the concourse worker can talk to credhub. - Depending on how you deployed credhub and/or the worker, - this may not be possible. - Using credhub-interpolate inverts control; - now workers need to access Credhub. - With `prepare-tasks-with-secrets` and other uses of Concourse's native integration, - the ATC retrieves secrets from Credhub and passes them to the worker. - -## Defining Multiline Certificates and Keys in Config Files -There are three ways to include certificates in the yaml files that are used by Platform Automation Toolkit tasks. - -1. Direct inclusion in yaml file - - ```yaml - # An incomplete base.yml response from om staged-config - product-name: cf - - product-properties: - .uaa.service_provider_key_credentials: - value: - cert_pem: | - -----BEGIN CERTIFICATE----- - ...... - -----END CERTIFICATE----- - private_key_pem: | - -----BEGIN RSA PRIVATE KEY----- - ...... - -----END RSA PRIVATE KEY----- - - .properties.networking_poe_ssl_certs: - value: - - - certificate: - cert_pem: | - -----BEGIN CERTIFICATE----- - ...... - -----END CERTIFICATE----- - private_key_pem: | - -----BEGIN RSA PRIVATE KEY----- - ...... - -----END RSA PRIVATE KEY----- - ``` - -1. Secrets Manager reference in yaml file - - ```yaml - # An incomplete base.yml - product-name: cf - - product-properties: - .uaa.service_provider_key_credentials: - value: - cert_pem: ((uaa_service_provider_key_credentials.certificate)) - private_key_pem: ((uaa_service_provider_key_credentials.private_key)) - - .properties.networking_poe_ssl_certs: - value: - - - certificate: - cert_pem: ((networking_poe_ssl_certs.certificate)) - private_key_pem: ((networking_poe_ssl_certs.private_key)) - ``` - - This example assumes the use of Credhub. - - Credhub supports a `--type=certificate` credential type - which allows you to store a certificate and private key pair under a single name. - The cert and key can be stored temporarily in local files - or can be passed directly on the command line. - - An example of the file storage method: - - ```bash - credhub set --type=certificate \ - --name=uaa_service_provider_key_credentials \ - --certificate=./cert.pem \ - --private=./private.key - ``` - -1. Using vars files - - Vars files are a mix of the two previous methods. - The cert/key is defined inline in the vars file: - - ```yaml - #vars.yml - uaa_service_provider_key_credentials_cert_pem: | - -----BEGIN CERTIFICATE----- - ...... - -----END CERTIFICATE----- - uaa_service_provider_key_credentials_private_key: | - -----BEGIN RSA PRIVATE KEY----- - ...... - -----END RSA PRIVATE KEY----- - networking_poe_ssl_certs_cert_pem: | - -----BEGIN CERTIFICATE----- - ...... - -----END CERTIFICATE----- - networking_poe_ssl_certs_private_key: | - -----BEGIN RSA PRIVATE KEY----- - ...... - -----END RSA PRIVATE KEY----- - ``` - - and referenced as a `((parameter))` in the `base.yml` - - ```yaml - # An incomplete base.yml - product-name: cf - - product-properties: - .uaa.service_provider_key_credentials: - value: - cert_pem: ((uaa_service_provider_key_credentials_cert_pem)) - private_key_pem: ((uaa_service_provider_key_credentials_private_key)) - - .properties.networking_poe_ssl_certs: - value: - - - certificate: - cert_pem: ((networking_poe_ssl_certs_cert_pem)) - private_key_pem: ((networking_poe_ssl_certs_private_key)) - ``` - -## Storing values for Multi-foundation -### Concourse Supported Secrets Store -If you have multiple foundations, store relevant keys to that foundation in a different pipeline path, -and Concourse will read those values in appropriately. -If sharing the same `base.yml` across foundations, it is recommended to have a different pipeline per foundation. - -### Vars Files -Vars files can be used for your secrets handling. -They are **not** recommended, but are sometimes required based on your foundation setup. - -Take the example below (which only uses vars files and does not use a secrets store): - -{% include ".cf-partial-config.md" %} - -In our first foundation, we have the following `vars.yml`, optional for the [`configure-product`][configure-product] task. -```yaml -# vars.yml -cloud_controller_encrypt_key.secret: super-secret-encryption-key -cloud_controller_apps_domain: cfapps.domain.com -``` - -The `vars.yml` can then be passed to [`configure-product`][configure-product] with `base.yml` as the config file. -The `configure-product` task will then sub the `((cloud_controller_encrypt_key.secret))` and `((cloud_controller_apps_domain))` -specified in `vars.yml` and configure the product as normal. - -An example of how this might look in a pipeline(resources not listed): -```yaml -jobs: -- name: configure-product - plan: - - aggregate: - - get: platform-automation-image - params: - unpack: true - - get: platform-automation-tasks - params: - unpack: true - - get: configuration - - get: variable - - task: configure-product - image: platform-automation-image - file: platform-automation-tasks/tasks/configure-product.yml - input_mapping: - config: configuration - env: configuration - vars: variable - params: - CONFIG_FILE: base.yml - VARS_FILES: vars.yml - ENV_FILE: env.yml -``` - -If deploying more than one foundation, a unique `vars.yml` should be used for each foundation. - -### prepare-tasks-with-secrets and Vars Files -Both Credhub and vars files may be used together to interpolate variables into `base.yml`. -This use case is described in the [Using prepare-tasks-with-secrets](#using-prepare-tasks-with-secrets) section. - -### credub-interpolate and Vars Files -Both Credhub and vars files may be used together to interpolate variables into `base.yml`. -Using the same example from above: - -{% include ".cf-partial-config.md" %} - -We have one parametrized variable that is secret and might not want to have stored in -a plain text vars file, `((cloud_controller_encrypt_key.secret))`, but `((cloud_controller_apps_domain))` -is fine in a vars file. In order to support a `base.yml` with credentials from multiple sources (i.e. -credhub and vars files), you will need to `SKIP_MISSING: true` in the [`credhub-interpolate`][credhub-interpolate] task. -This is enabled by default by the `credhub-interpolate` task. - -The workflow would be the same as [Credhub](#concourse-supported-secrets-store), but when passing the interpolated `base.yml` as a config into the -next task, you would add in a [Vars File](#vars-files) to fill in the missing variables. - -An example of how this might look in a pipeline (resources not listed), assuming: - -- The `((base.yml))` above -- `((cloud_controller_encrypt_key.secret))` is stored in credhub -- `((cloud_controller_apps_domain))` is stored in `director-vars.yml` - -```yaml -jobs: -- name: example-credhub-interpolate - plan: - - get: platform-automation-tasks - - get: platform-automation-image - - get: config - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - input_mapping: - files: config - params: - # depending on credhub configuration - # ether Credhub CA cert or Credhub secret are required - CREDHUB_CA_CERT: ((credhub_ca_cert)) - CREDHUB_SECRET: ((credhub_secret)) - - # all required - CREDHUB_CLIENT: ((credhub_client)) - CREDHUB_SERVER: ((credhub_server)) - PREFIX: /concourse/:team_name/:pipeline_name - SKIP_MISSING: true -- name: example-configure-director - plan: - - get: - - task: configure-director - image: platform-automation-image - file: platform-automation-tasks/tasks/configure-director.yml - params: - VARS_FILES: vars/director-vars.yml - ENV_FILE: env/env.yml - DIRECTOR_CONFIG_FILE: config/director.yml -``` - -### credhub-interpolate and Multiple Key Lookups -When using the `credhub-interpolate` task with a Credhub in a single foundation or multi-foundation manner, -we want to avoid duplicating identical credentials -(duplication makes credential rotation harder). - -In order to have Credhub read in credentials from multiple paths -(not relative to your `PREFIX`), -you must provide the absolute path to any credentials -not in your relative path. - -For example, using an alternative `base.yml`: -```yaml -# An incomplete yaml response from om staged-config -product-name: cf - -product-properties: - .cloud_controller.apps_domain: - value: ((cloud_controller_apps_domain)) - .cloud_controller.encrypt_key: - value: - secret: ((/alternate_prefix/cloud_controller_encrypt_key.secret)) - .properties.security_acknowledgement: - value: X - .properties.cloud_controller_default_stack: - value: default -``` - -Let's say in our `job`, we define the prefix as "foundation1". -The parameterized values in the example above will be interpolated as follows: - -`((cloud_controller_apps_domain))` uses a relative path for Credhub. -When running `credhub-interpolate`, the task will prepend the `PREFIX`. -This value is stored in Credhub as `/foundation1/cloud_controller_apps_domain`. - -`((/alternate_prefix/cloud_controller_encrypt_key.secret))` (note the leading slash) -uses an absolute path for Credhub. -When running `credhub-interpolate`, the task will not prepend the prefix. -This value is stored in Credhub at it's absolute path `/alternate_prefix/cloud_controller_encrypt_key.secret`. - -Any value with a leading `/` slash will never use the `PREFIX` -to look up values in Credhub. -Therefore, you can have multiple key lookups in a single interpolate task. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/concepts/secrets-handling.md.html.erb b/docs/concepts/secrets-handling.md.html.erb new file mode 100644 index 00000000..9e6d4812 --- /dev/null +++ b/docs/concepts/secrets-handling.md.html.erb @@ -0,0 +1,595 @@ +# Using a secrets store to store credentials + +Secrets stores such as CredHub can be used to store secure properties that you don't want committed into a config file. +Within your pipeline, the config file can then reference that secrets store value for runtime evaluation. + +Platform Automation Toolkit contains two tasks to help with retrieving these credentials in the tasks that use them: + +* If you're using Concourse version 5 or newer + the [`prepare-tasks-with-secrets`](#using-prepare-tasks-with-secrets) + task can be used with any Concourse supported [secrets store](https://concourse-ci.org/creds.html). +* The [`credhub-interpolate`](#using-credhub-interpolate) task can be used only with CredHub. + +## Using prepare-tasks-with-secrets + +The [`prepare-tasks-with-secrets`](../tasks.html#prepare-tasks-with-secrets) task takes a set of tasks +and modifies them to include environment variables that reference the variables found in the config files provided. +This allows use of the native [Concourse secrets handling](https://concourse-ci.org/creds.html) +and provides support for any secret store Concourse supports. + +The [`prepare-tasks-with-secrets`](../tasks.html#prepare-tasks-with-secrets) task +replaces the [credhub-interpolate](../tasks.html#credhub-interpolate) task on Concourse versions 5.x+ +and provides the following benefits: + +* Support for all native Concourse secrets stores including CredHub and Vault. +* CredHub credentials are no longer required by the task so they can be completely handled by Concourse. +* Secrets are no longer written to disk, which alleviates some security concerns. + +The `prepare-tasks-with-secrets` task can be used in two different ways: + +* Adding to a pipeline without an already implemented `credhub-interpolate` task +* Replacing an already implemented [credhub-interpolate task](#replacing-credhub-interpolate-with-prepare-tasks-with-secrets) + +

+When using prepare-tasks-with-secrets, all secrets must exist in either a secrets store +or a vars file found under VARS_PATHS. +If a vars from a config file can't be found in CredHub, +it must be available in a YAML file found under VARS_PATHS in prepare-tasks-with-secrets. +This will prevent these credentials from being added as environment variables to the task; this would +result in Concourse being unable to find them in the secrets store.

+ +To understand how `prepare-tasks-with-secrets` modifies the Platform Automation Toolkit tasks, +the following is an example of how a task will be changed: + +1. Authenticate with your CredHub instance. +2. Generate a username and password. + + ```bash + credhub generate --name="/concourse/:team_name/:pipeline_name/vcenter_login" --type=user --username=some-user + ``` + + +3. Create a director configuration file that references the properties + using the `om` interpolation syntax. + + ```yaml + properties-configuration: + iaas_configuration: + vcenter_host: ((vcenter_host)) + vcenter_username: ((vcenter_login.username)) + vcenter_password: ((vcenter_login.password)) + ``` + +4. (Optional) Create vars files with additional variables not stored in the secrets store. + + This is recommended only for non-secret variables. + It's more secure to store secrets in the secrets store. + If using multiple foundations, there are some cases where + a foundation-specific key might not be sensitive, + but should be extracted to allow reuse of the config file between foundations. + If using a single config file for multiple foundations, + vars files may be used instead of storing the variables in a secrets store. + + For example: + + ```yaml + vcenter_host: vcenter.example.com + ``` + +5. Configure your pipeline to use the [`prepare-tasks-with-secrets`](../tasks.html#prepare-tasks-with-secrets) task. + + * The `config` input is required and is a directory that contains your configuration file from (3). + * The `tasks` input is required and is the set of tasks that will be modified. + * The `vars` input and `VARS_PATHS` param are only required + if vars files are being used in subsequent tasks. + * The `output_mapping` section is required and is where the modified tasks will be. + + The declaration in a pipeline might look similar to this example: + + ```yaml + - task: prepare-tasks-with-secrets + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + image: platform-automation-image + input_mapping: + tasks: platform-automation-tasks + config: deployments + vars: deployments # required only if using vars + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: ((foundation))/config + VARS_PATHS: ((foundation))/vars # required only if using vars + ``` + +

+ Unlike with credhub-interpolate, + there is no concept of SKIP_MISSING. + As such, if there are credentials that will be filled in future jobs by vars files, + those vars files must be provided in the vars input and the VARS_PATHS param.

+ + This task will replace all of the tasks provided in the `tasks` input with the modified tasks. + The modified tasks include an extended `params` section with the secret references detected from the config files. + +6. Use the modified tasks in future jobs. + +### What the prepare-tasks-with-secrets task is doing + +Here's an example of what `prepare-tasks-with-secrets` is doing internally. +Given an original task and the previously provided config/vars files: + +```yaml +# Original Platform Automation Toolkit Task +platform: linux + +inputs: +- name: platform-automation-tasks +- name: config # contains the director configuration file +- name: env # contains the env file with target OpsMan Information +- name: vars # variable files to be made available + optional: true +- name: secrets + # secret files to be made available + # separate from vars, so they can be store securely + optional: true +- name: ops-files # operations files to custom configure the product + optional: true + +params: + VARS_FILES: + # - Optional + # - Filepath to the Tanzu Operations Manager vars YAML file + # - The path is relative to root of the task build, + # so `vars` and `secrets` can be used. + + OPS_FILES: + # - Optional + # - Filepath to the Tanzu Operations Manager operations YAML files + # - The path is relative to root of the task build + + ENV_FILE: env.yml + # - Required + # - Filepath of the env config YAML + # - The path is relative to root of the `env` input + + DIRECTOR_CONFIG_FILE: director.yml + # - Required + # - Filepath to the director configuration YAML file + # - The path is relative to the root of the `config` input + +run: + path: platform-automation-tasks/tasks/configure-director.sh +``` + +The `prepare-tasks-with-secrets` task modifies the original task +to embed the variables found in `director.yml`, in the `params` section. +Any variables found in the `vars.yml` file will not be included in the modified task. +The `params` added will have a prefix of `OM_VAR`, to avoid collisions. +The task is a programmatically modified YAML file, so the output loses the comments and keys are sorted. + +```yaml +# prepare-job-with-secrets Generated Task +inputs: +- name: platform-automation-tasks +- name: config +- name: env +- name: vars + optional: true +- name: secrets + optional: true +- name: ops-files + optional: true + +params: + DIRECTOR_CONFIG_FILE: director.yml + ENV_FILE: env.yml + OM_VAR_vcenter_password: ((vcenter_password)) + OM_VARS_ENV: OM_VAR + OPS_FILES: + VARS_FILES: + +platform: linux + +run: + path: platform-automation-tasks/tasks/configure-director.sh +``` + +### Replacing credhub-interpolate with prepare-tasks-with-secrets + +If you already have implemented the [`credhub-interpolate`](../tasks.html#credhub-interpolate) task in your pipeline, +this solution should be a drop-in replacement if you are not using vars files. +You must be using Concourse 5.x or greater. + +If you are using vars files, the `vars` input and the `VARS_PATHS` param will also need to be set on the `prepare-tasks-with-secrets` task. + +For example, if the existing `credhub-interpolate` task looks like this: + +<%= partial "../examples/anchors/credhub-interpolate" %> + +In this task definition, you defined the prefix and CredHub authorization credentials. +The new `prepare-tasks-with-secrets` task uses Concourse's native integration with CredHub (and other credential managers). +The task definition can be replaced with the following: + +```yaml +- task: prepare-tasks-with-secrets + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + config: configuration + vars: configuration # required only if using vars + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: ((foundation))/config + VARS_PATHS: ((foundation))/vars # required only if using vars +``` + +

+If using vars files in subsequent tasks, the vars input and the VARS_PATHS param must be used to prevent +interpolation errors in those subsequent tasks.

+ +**For the preceding, also note:** + +* The `output_mapping`, which is required. + This will replace all `platform-automation-tasks` with the modified tasks. + The modification now includes an extended `params`, which now includes the secrets references detected from the config files. +* The `INTERPOLATION_PATHS` is now `CONFIG_PATHS`. + The concept of reading the references from the config files is still here, + but no interpolation actually happens. +* The `PREFIX` is no longer defined or provided. + Since the tasks are using Concourse's native credential management, the lookup path is predetermined. + For example, `/concourse/:team_name/:cred_name` or `/concourse/:team_name/:pipeline_name/:cred_name`. + +## Using credhub-interpolate + +The [credhub-interpolate](../tasks.html#credhub-interpolate) task can only be used with CredHub. + +**If you are using Concourse 5.x+, it is recommended that you use the [prepare-tasks-with-secrets](../tasks.html#prepare-tasks-with-secrets) task instead.** + +An example workflow: Storing an SSH key + +1. Authenticate with your CredHub instance. +2. Generate an SSH key: `credhub generate --name="/concourse/:team_name/:pipeline_name/opsman_ssh_key" --type=ssh` +3. Create an [Tanzu Operations Manager configuration](../inputs-outputs.html#opsman-config) file that references the name of the property. + + ```yaml + opsman-configuration: + azure: + ssh_public_key: ((opsman_ssh_key.public_key)) + ``` + +4. Configure your pipeline to use the [credhub-interpolate](../tasks.html#credhub-interpolate) task. + It takes an input called `files`, which should contain your configuration file from the earlier step. + + The declaration in a pipeline might look like this: + + ```yaml + jobs: + - name: example-job + plan: + - get: platform-automation-tasks + - get: platform-automation-image + - get: config + - task: credhub-interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/credhub-interpolate.yml + input_mapping: + files: config + params: + # depending on CredHub configuration + # ether CA cert or secret are required + CREDHUB_CA_CERT: ((credhub_ca_cert)) + CREDHUB_SECRET: ((credhub_secret)) + + # all required + CREDHUB_CLIENT: ((credhub_client)) + CREDHUB_SERVER: ((credhub_server)) + PREFIX: /concourse/:team_name/:pipeline_name + SKIP_MISSING: true + ``` + + This task will reach out to the deployed CredHub and fill in your entry references and return an output + named `interpolated-files` that can then be read as an input to any tasks that follow. + + Our configuration will now look like this: + + ```yaml + opsman-configuration: + azure: + ssh_public_key: ssh-rsa AAAAB3Nz... + ``` + +

+If using this, you need to ensure that the Concourse worker can talk to CredHub. +Depending on how you deployed CredHub and/or the worker, +this may not be possible. +Using credhub-interpolate inverts control; +now workers need to access CredHub. +With prepare-tasks-with-secrets and other uses of the Concourse native integration, +the ATC retrieves secrets from CredHub and passes them to the worker.

+ +## Defining multiline certificates and keys in config files + +There are three ways to include certificates in the YAML files that are used by Platform Automation Toolkit tasks. + +1. Direct inclusion in YAML file: + + ```yaml + # An incomplete base.yml response from om staged-config + product-name: cf + + product-properties: + .uaa.service_provider_key_credentials: + value: + cert_pem: | + -----BEGIN CERTIFICATE----- + ...... + -----END CERTIFICATE----- + private_key_pem: | + -----BEGIN RSA PRIVATE KEY----- + ...... + -----END RSA PRIVATE KEY----- + + .properties.networking_poe_ssl_certs: + value: + - + certificate: + cert_pem: | + -----BEGIN CERTIFICATE----- + ...... + -----END CERTIFICATE----- + private_key_pem: | + -----BEGIN RSA PRIVATE KEY----- + ...... + -----END RSA PRIVATE KEY----- + ``` + +1. Secrets manager reference in YAML file: + + ```yaml + # An incomplete base.yml + product-name: cf + + product-properties: + .uaa.service_provider_key_credentials: + value: + cert_pem: ((uaa_service_provider_key_credentials.certificate)) + private_key_pem: ((uaa_service_provider_key_credentials.private_key)) + + .properties.networking_poe_ssl_certs: + value: + - + certificate: + cert_pem: ((networking_poe_ssl_certs.certificate)) + private_key_pem: ((networking_poe_ssl_certs.private_key)) + ``` + + This example assumes the use of CredHub. + + CredHub supports a `--type=certificate` credential type + which allows you to store a certificate and private key pair under a single name. + The cert and key can be stored temporarily in local files + or can be passed directly on the command line. + + An example of the file storage method: + + ```bash + credhub set --type=certificate \ + --name=uaa_service_provider_key_credentials \ + --certificate=./cert.pem \ + --private=./private.key + ``` + +1. Using vars files: + + Vars files are a mix of the two previous methods. + The cert/key is defined inline in the vars file. + + ```yaml + #vars.yml + uaa_service_provider_key_credentials_cert_pem: | + -----BEGIN CERTIFICATE----- + ...... + -----END CERTIFICATE----- + uaa_service_provider_key_credentials_private_key: | + -----BEGIN RSA PRIVATE KEY----- + ...... + -----END RSA PRIVATE KEY----- + networking_poe_ssl_certs_cert_pem: | + -----BEGIN CERTIFICATE----- + ...... + -----END CERTIFICATE----- + networking_poe_ssl_certs_private_key: | + -----BEGIN RSA PRIVATE KEY----- + ...... + -----END RSA PRIVATE KEY----- + ``` + + It is then referenced as a `((parameter))` in the `base.yml`. + + ```yaml + # An incomplete base.yml + product-name: cf + + product-properties: + .uaa.service_provider_key_credentials: + value: + cert_pem: ((uaa_service_provider_key_credentials_cert_pem)) + private_key_pem: ((uaa_service_provider_key_credentials_private_key)) + + .properties.networking_poe_ssl_certs: + value: + - + certificate: + cert_pem: ((networking_poe_ssl_certs_cert_pem)) + private_key_pem: ((networking_poe_ssl_certs_private_key)) + ``` + +## Storing values for multi-foundation deployment + +### Concourse-supported secrets store + +If you have multiple foundations, store relevant keys to each foundation in a different pipeline path, +and Concourse reads the values in appropriately. +When sharing the same `base.yml` across foundations, it is recommended +that you have a different pipeline for each foundation. + +### Vars files +Vars files can be used for your secrets handling. +They are **not** recommended, but are sometimes required based on your foundation setup. + +Consider the following example (which only uses vars files and does not use a secrets store): + +<%= partial "cf-partial-config" %> + +The first foundation has the following `vars.yml`, optional for the [`configure-product`](../tasks.html#configure-product) task. + +```yaml +# vars.yml +cloud_controller_encrypt_key.secret: super-secret-encryption-key +cloud_controller_apps_domain: cfapps.domain.com +``` + +The `vars.yml` can then be passed to [`configure-product`](../tasks.html#configure-product) with `base.yml` as the config file. +The `configure-product` task will then substitute the `((cloud_controller_encrypt_key.secret))` and `((cloud_controller_apps_domain))` +specified in `vars.yml` and configure the product as normal. + +An example of how this might look in a pipeline. (Resources are not listed in this example): + +```yaml +jobs: +- name: configure-product + plan: + - aggregate: + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - get: variable + - task: configure-product + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: configuration + env: configuration + vars: variable + params: + CONFIG_FILE: base.yml + VARS_FILES: vars.yml + ENV_FILE: env.yml +``` + +If deploying more than one foundation, a unique `vars.yml` should be used for each foundation. + +### Using the tasks with vars files + +* Using `prepare-tasks-with-secrets` and vars files: CredHub and vars files may be used together to interpolate variables into `base.yml`. +This use case is described in [Using prepare-tasks-with-secrets](#using-prepare-tasks-with-secrets). + +* Using `credhub-interpolate` and vars files: CredHub and vars files may be used together to interpolate variables into `base.yml`. +Using the same example from earlier in this topic. + +### Using credhub-interpolate with vars files + +Using `credhub-interpolate` and vars files together: + +<%= partial "cf-partial-config" %> + +There is one parametrized variable that is secret and you might not want to have stored in +a plain text vars file, `((cloud_controller_encrypt_key.secret))`, but `((cloud_controller_apps_domain))` +is fine in a vars file. + +To support a `base.yml` with credentials from multiple sources (that is, +CredHub and vars files), you must use `SKIP_MISSING: true` in the [`credhub-interpolate`](../tasks.html#credhub-interpolate) task. +This is enabled by default by the `credhub-interpolate` task. + +The workflow is the same as [CredHub](#concourse-supported-secrets-store), but when passing the interpolated `base.yml` as a config into the +next task, you add in a [vars file](#vars-files) to fill in the missing variables. + +An example of how this might look in a pipeline (resources not listed), assuming: + +- The `((base.yml))` above +- `((cloud_controller_encrypt_key.secret))` is stored in CredHub +- `((cloud_controller_apps_domain))` is stored in `director-vars.yml` + +```yaml +jobs: +- name: example-credhub-interpolate + plan: + - get: platform-automation-tasks + - get: platform-automation-image + - get: config + - task: credhub-interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/credhub-interpolate.yml + input_mapping: + files: config + params: + # depending on CredHub configuration + # ether CredHub CA cert or CredHub secret are required + CREDHUB_CA_CERT: ((credhub_ca_cert)) + CREDHUB_SECRET: ((credhub_secret)) + + # all required + CREDHUB_CLIENT: ((credhub_client)) + CREDHUB_SERVER: ((credhub_server)) + PREFIX: /concourse/:team_name/:pipeline_name + SKIP_MISSING: true +- name: example-configure-director + plan: + - get: + - task: configure-director + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-director.yml + params: + VARS_FILES: vars/director-vars.yml + ENV_FILE: env/env.yml + DIRECTOR_CONFIG_FILE: config/director.yml +``` + +### credhub-interpolate and multiple key lookups + +When using the `credhub-interpolate` task with a CredHub in a single foundation or multi-foundation, +it's best to avoid duplicating identical credentials +(duplication makes credential rotation more difficult). + +To have CredHub read in credentials from multiple paths +(not relative to your `PREFIX`), +you must provide the absolute path to any credentials +not in your relative path. + +For example, using an alternative `base.yml`: + +```yaml +# An incomplete YAML response from om staged-config +product-name: cf + +product-properties: + .cloud_controller.apps_domain: + value: ((cloud_controller_apps_domain)) + .cloud_controller.encrypt_key: + value: + secret: ((/alternate_prefix/cloud_controller_encrypt_key.secret)) + .properties.security_acknowledgement: + value: X + .properties.cloud_controller_default_stack: + value: default +``` + +Suppose there is an example `job` in which the prefix is defined as "foundation1." +The parameterized values in the previous example are interpolated as follows: + +* `((cloud_controller_apps_domain))` uses a relative path for CredHub. +When running `credhub-interpolate`, the task prepends the `PREFIX`. +This value is stored in CredHub as `/foundation1/cloud_controller_apps_domain`. + +* `((/alternate_prefix/cloud_controller_encrypt_key.secret))` (note the leading slash) +uses an absolute path for CredHub. +When running `credhub-interpolate`, the task does not prepend the prefix. +This value is stored in CredHub at its absolute path `/alternate_prefix/cloud_controller_encrypt_key.secret`. + +Any value with a leading `/` slash will never use the `PREFIX` +to look up values in CredHub, so +you can have multiple key lookups in a single interpolate task. diff --git a/docs/concepts/stemcell-handling.md b/docs/concepts/stemcell-handling.md deleted file mode 100644 index 8c0b4812..00000000 --- a/docs/concepts/stemcell-handling.md +++ /dev/null @@ -1,94 +0,0 @@ -# Stemcell Handling - -## What is Stemcell Handling? -In Ops Manager, every product uploaded and staged needs to be given a [stemcell][bosh-stemcell] in -order to operate. By default, every stemcell uploaded to Ops Manager will automatically associate -with any new or existing products. Using the automation tasks, this default can be overridden to -not have a stemcell associate with any products, and can be manually assigned as deemed necessary -by the user. - -## Why do your Stemcell Handling Manually? -Unless there is a specific need to manually handle the stemcells in Ops Manager, it is recommended -to use the default. A common use case for manual stemcell handling is updating the product stemcells -one at a time to minimize downtime during apply changes. This is particularly beneficial in environments -with large numbers of tiles that share the same stemcell. - -## How to use the Stemcell Handling Tasks in Automation -Platform Automation Toolkit has tasks that will assist in the manual handling of stemcells within -Ops Manager. These tasks, in order, are: - -- [download-product][download-product] -- [upload-product][upload-product] -- [stage-product][stage-product] -- [upload-stemcell][upload-stemcell] -- [assign-stemcell][assign-stemcell] - -1. `download-product`: - - Create a `config.yml` for this task using the [example provided][download-product-config]. - - After running the task, a file named `assign-stemcell.yml` is outputted. - The task will put a config file with two values, `product` and `stemcell` into the `assign-stemcell-config` - output directory. This can be used with [assign-stemcell][assign-stemcell] to ensure the _latest_ stemcell is - used with that product. - -2. Run the [upload-product][upload-product] and [stage-product][stage-product] tasks to get the - resources into Ops Manager. - -3. Run the [upload-stemcell][upload-stemcell] task. - - To upload the stemcell to Ops Manager without associating it with any product, the - [`upload-stemcell`][upload-stemcell] task will need to be executed with the `FLOATING_STEMCELL: false` - flag set. - - An example of this, in a pipeline: - -```yaml -- task: upload-stemcell - image: platform-automation-image - file: platform-automation-tasks/tasks/upload-stemcell.yml - input_mapping: - env: configuration - stemcell: downloaded-stemcell - params: - ENV_FILE: ((foundation))/env/env.yml - FLOATING_STEMCELL: false -``` - -!!! warning - `upload-stemcell` should not be run until after the `stage-product` task has completed. When the two tasks are run in the - opposite order, the stemcell will still auto-associate with the product. - - -4. Run the [assign-stemcell][assign-stemcell] task to associate the stemcell with the staged product. - If using the `download-product` task before doing this within the same job, you must assign the config - using the `input_mapping` key to assign the outputted config to the config that `assign-stemcell` is - expecting. Upon successful completion, the stemcell specified in the config will be associated with the product - specified in the config, and no other product will be associated with that stemcell. - - An example of this, in a pipeline: - -```yaml -- task: assign-stemcell - image: platform-automation-image - file: platform-automation-tasks/tasks/assign-stemcell.yml - input_mapping: - env: configuration - config: assign-stemcell-config - params: - ENV_FILE: ((foundation))/env/env.yml -``` - - -5. [Configure Product][configure-product] and [Apply Changes][apply-changes] can then be run on the -product as normal. - -## How to Download a Specific Stemcell - -Platform Automation Toolkit can be used to download a specific stemcell. In order to do so, create a `config.yml` for this -task using the [example provided][download-stemcell-product-config]. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/concepts/stemcell-handling.md.html.erb b/docs/concepts/stemcell-handling.md.html.erb new file mode 100644 index 00000000..86ba47f8 --- /dev/null +++ b/docs/concepts/stemcell-handling.md.html.erb @@ -0,0 +1,87 @@ +# Handling stemcells + + +In Tanzu Operations Manager, every product uploaded and staged needs to be given a [stemcell](https://bosh.io/docs/stemcell/) in +order to operate. By default, every stemcell uploaded to Tanzu Operations Manager automatically associates +with any new or existing products. Using the automation tasks, this default can be overridden to +not have a stemcell associate with any products, and can be manually assigned as you deem necessary. + +## Why do your stemcell handling manually? + +Unless there is a specific need to manually handle the stemcells in Tanzu Operations Manager, it is recommended +that you use the default. A common use case for manual stemcell handling is updating the product stemcells +one at a time to minimize downtime during Apply Changes. This is particularly beneficial in environments +with large numbers of tiles that share the same stemcell. + +## How to use the stemcell handling tasks in automation + +Platform Automation Toolkit has tasks for the manual handling of stemcells in +Tanzu Operations Manager. These tasks, in order, are: + +- [download-product](../tasks.html#download-product) +- [upload-product](../tasks.html#upload-product) +- [stage-product](../tasks.html#stage-product) +- [upload-stemcell](../tasks.html#upload-stemcell) +- [assign-stemcell](../tasks.html#assign-stemcell) + +Follow these steps: + +1. `download-product`: Create a `config.yml` for this task using the [example provided](../inputs-outputs.html#download-product-config). + + After running the task, there will be a new file named `assign-stemcell.yml`. + The task put a config file containing two values, `product` and `stemcell`, into the `assign-stemcell-config` + output directory. This can be used with [assign-stemcell](../tasks.html#assign-stemcell) to ensure that the latest stemcell is + used with that product. + +2. Run the [upload-product](../tasks.html#upload-product) and [stage-product](../tasks.html#stage-product) tasks to get the + resources into Tanzu Operations Manager. + +3. Run the [upload-stemcell](../tasks.html#upload-stemcell) task. + + To upload the stemcell to Tanzu Operations Manager without associating it with a product, execute the + `upload-stemcell` task with the `FLOATING_STEMCELL: false` flag set. + + Here is an example of this, in a pipeline: + + ```yaml + - task: upload-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-stemcell.yml + input_mapping: + env: configuration + stemcell: downloaded-stemcell + params: + ENV_FILE: ((foundation))/env/env.yml + FLOATING_STEMCELL: false + ``` + +

+ Do not run upload-stemcell until after the stage-product task has completed. When the two tasks are run in the + opposite order, the stemcell still auto-associates with the product.

+ +4. Run the [assign-stemcell](../tasks.html#assign-stemcell) task to associate the stemcell with the staged product. + If you are using the `download-product` task before doing this in the same job, you must assign the config + using the `input_mapping` key to assign the outputted config to the config that `assign-stemcell` is + expecting. Upon successful completion, the stemcell specified in the config will be associated with the product + specified in the config, and no other product will be associated with that stemcell. + + Here is an example of this, in a pipeline: + + ```yaml + - task: assign-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/assign-stemcell.yml + input_mapping: + env: configuration + config: assign-stemcell-config + params: + ENV_FILE: ((foundation))/env/env.yml + ``` + +5. Now you can run [configure product](../tasks.html#configure-product) and [apply changes](../tasks.html#apply-changes) on the +product as normal. + +## How to download a specific stemcell + +Platform Automation Toolkit can be used to download a specific stemcell. To do this, create a `config.yml` for this +task using the example provided in [Task inputs and outputs](../inputs-outputs.html#download-stemcell-product-config). diff --git a/docs/concepts/upgrade.md b/docs/concepts/upgrade.md deleted file mode 100644 index 36caddb6..00000000 --- a/docs/concepts/upgrade.md +++ /dev/null @@ -1,104 +0,0 @@ -This topic provides an overview -of upgrading and recovering an Ops Manager using Platform Automation Toolkit, -including common errors. - -{% include "./.export_installation_note.md" %} - -## Upgrading Ops Manager - -It's important to note when upgrading your Ops Manager: - -* always perform an export installation -* persist that exported installation -* installation is separate from upgrade -* an initial installation is done, which maintains state - -### Upgrade Flowchart -The [`upgrade-opsman`][upgrade-opsman] task follows the flow based on state of an Ops Manager VM. -This flowchart gives a high level overview of how the task makes decisions for an upgrade. - -{% include "./upgrade-flowchart.mmd" %} - -On successive invocations of the task, it will offer different behaviour of the previous run. -This aids in recovering from failures (ie: from an IAAS) that occur. - -## Recovering the Ops Manager VM -Using the `upgrade-opsman` task will always delete the VM. -This is done to create a consistent and simplified experience across IAASs. -For example, some IAASs have IP conflicts -if there are multiple Ops Manager VMs present. - -If there is an issue during the upgrade process, -you may need to recover your Ops Manager VM. -Recovering your VM can be done in two different ways. -Both methods require an exported installation. - -1. **Recovery using the upgrade-opsman task**. Depending on the error, - the VM could be recovered by re-running [`upgrade-opsman`][upgrade-opsman]. - This may or may not require a change to the [state file][state], - depending on if there is an [ensure][concourse-ensure] - set for the state file resource. - -1. **Manual recovery**. The VM can always be recovered manually - by deploying the Ops Manager OVA, raw, or yml from Tanzu Network. - -Below is a list of common errors when running `upgrade-opsman`. - -- **Error: The Ops Manager API is inaccessible.** - Rerun the [`upgrade-opsman`][upgrade-opsman] task. The task will assume that the Ops Manager VM is not - created, and will run the [`create-vm`][create-vm] and - [`import-installation`][import-installation] tasks. - -- **Error: The CLI for a supported IAAS fails.** (i.e., bad network, outage, etc) - The specific error will be returned as output, - but most errors can be fixed - by re-running the [`upgrade-opsman`][upgrade-opsman] task. - -## Restoring the Original Ops Manager VM -There may be an instance in which you want to restore a previous Ops Manager VM -before completing the upgrade process. - -It is recommended to restore a previous Ops Manager VM manually. -The [Running Commands Locally How-to Guide][running-commands-locally] -is a helpful resource to get started with the manual process below. - -1. Run `delete-vm` on the failed or non-desired Ops Manager - using the [`state.yml`][state] if applicable. - [`opsman.yml`][opsman-config] is required for this command. - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - p-automator delete-vm --state-file state.yml --config opsman.yml - ``` - -1. Run `create-vm` using either an empty [`state.yml`][state] - or the state output by the previous step. - This command requires the image file from Tanzu Network - of the original version that was deployed (yml, ova, raw). - [`opsman.yml`][opsman-config] is required for this command. - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - p-automator create-vm --config opsman.yml --image-file original-opsman-image.yml \ - --state state.yml - ``` - -1. Run `import-installation`. - This command requires the exported installation of the original Ops Manager - and the `env.yml` used by Platform Automation Toolkit - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - om --env env.yml import-installation --installation installation.zip - ``` - -Alternatively, these steps could be completed using the `upgrade-opsman` command. -This command requires all inputs described above. -```bash -docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ -p-automator upgrade-opsman --state-file state.yml \ ---config opsman.yml --image-file original-opsman-image.yml \ ---installation installation.zip --env-file env.yml -``` - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/concepts/upgrade.md.html.erb b/docs/concepts/upgrade.md.html.erb new file mode 100644 index 00000000..58471e86 --- /dev/null +++ b/docs/concepts/upgrade.md.html.erb @@ -0,0 +1,111 @@ +# Recovering and upgrading Tanzu Operations Manager + +This topic provides an overview +of upgrading and recovering a VMware Tanzu Operations Manager using Platform Automation Toolkit, +including common errors. + +<%= partial "./export_installation_note" %> + +## Upgrading Tanzu Operations Manager + +When upgrading your Tanzu Operations Manager: + +* always perform an export installation +* persist that exported installation +* separate installation from upgrade +* ensure that an initial installation is done, which maintains the state + +### Upgrade flowchart + +The [`upgrade-opsman`](../tasks.html#upgrade-opsman) task follows the flow based on the state of a Tanzu Operations Manager VM. +This flowchart gives a high level overview of how the task makes decisions for an upgrade. + +[//]: # (Include upgrade-flowchart.mmd flowchart) + +![The upgrade process checks the Ops Manager version and creates a new Ops Manager VM before importing the installation.](../img/upgrade-flowchart.png) + +On successive invocations of the task, it will offer different behavior compared to the previous run. +This aids in recovering from failures (that is, from an IAAS) that occur. + +## Recovering the Tanzu Operations Manager VM + +Using the `upgrade-opsman` task will always delete the VM. +This is done to create a consistent and simplified experience across IAASs. +For example, some IAASs have IP conflicts +if there are multiple Tanzu Operations Manager VMs present. + +If there is an problem during the upgrade, +you might need to recover your Tanzu Operations Manager VM. +Recovering your VM can be done in two different ways. +Both methods require an exported installation. + +* **Recovery using the upgrade-opsman task**. Depending on the error, +the VM could be recovered by re-running [`upgrade-opsman`](../tasks.html#upgrade-opsman). +This may or may not require a change to the [state file](../inputs-outputs.html#state), +depending on if there is an [ensure](https://concourse-ci.org/jobs.html#schema.step.ensure) +set for the state file resource. + +* **Manual recovery**. The VM can always be recovered manually +* by deploying the Tanzu Operations Manager OVA, raw, or YAML from Tanzu Network. + +This is a list of common errors when running `upgrade-opsman`. + +- **Error: The Tanzu Operations Manager API is inaccessible.** + Rerun the [`upgrade-opsman`](../tasks.html#upgrade-opsman) task. + The task assumes that the Tanzu Operations Manager VM is not + created, and runs the [`create-vm`](../tasks.html#create-vm) and + [`import-installation`](../tasks.html#import-installation) tasks. + +- **Error: The CLI for a supported IAAS fails.** (for example, bad network, outage, and so on) + The specific error is returned as output. + Most errors can be fixed + by re-running the [`upgrade-opsman`](../tasks.html#upgrade-opsman) task. + +## Restoring the original Tanzu Operations Manager VM + +There may be an instance in which you want to restore a previous Tanzu Operations Manager VM +before completing the upgrade. + +VMware recommends that you restore a previous Tanzu Operations Manager VM manually. +See [Running commands locally](../how-to-guides/running-commands-locally.html) +for information about the following manual process. + +1. Run `delete-vm` on the failed or non-desired Tanzu Operations Manager + using the [`state.yml`](../inputs-outputs.html#state) if applicable. + [`opsman.yml`](../inputs-outputs.html#tanzu-operations-manager-config) is required for this command. + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + p-automator delete-vm --state-file state.yml --config opsman.yml + ``` + +2. Run `create-vm` using either an empty [`state.yml`](../inputs-outputs.html#state) + or the state that was output by the previous step. + This command requires the image file from the originally deployed version from Tanzu Network + (yaml, ova, raw). + [`opsman.yml`](../inputs-outputs.html#tanzu-operations-manager-config) is required for this command. + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + p-automator create-vm --config opsman.yml --image-file original-opsman-image.yml \ + --state state.yml + ``` + +3. Run `import-installation`. + This command requires the exported installation of the original Tanzu Operations Manager + and the `env.yml` used by Platform Automation Toolkit. + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + om --env env.yml import-installation --installation installation.zip + ``` + +Alternatively, you can complete these steps using the `upgrade-opsman` command. +This command requires all of the inputs described above. + +```bash +docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ +p-automator upgrade-opsman --state-file state.yml \ +--config opsman.yml --image-file original-opsman-image.yml \ +--installation installation.zip --env-file env.yml +``` diff --git a/docs/concepts/variables.md b/docs/concepts/variables.md deleted file mode 100644 index 8d0831d9..00000000 --- a/docs/concepts/variables.md +++ /dev/null @@ -1,109 +0,0 @@ -# Variables - -## What are Platform Automation Toolkit variables? -Variables provide a way to define parameters for a YAML document. Each variable has a value -and can be referenced in one or more locations. Variables are used in the Platform Automation Toolkit -[tasks][task-reference]. One example usage is in [configure director][configure-director]. - -## Why use variables? -It's typically necessary to separate passwords, certificates, S3 bucket names etc. from YAML -documents for security or multi-foundation purposes. Even though the structure -of a YAML document (manifest) does not change, these values are typically different. Variables -require special syntax in the configuration files which need them. The resulting config file is then a -parametrized template for use. - -## Using variables -In the Platform Automation Toolkit task, you can choose to parametrize the specific entries in the configuration -file, by using the `((parametrized-value))` syntax, and then defining the `parametrized-value` in a -separate variable file. -For example, to add two variables to a YAML document (base.yml): - -```yaml -s3_bucket_name: ((foundation_one_bucket)) -domain_name: ((foundation_one_domain_name)) -``` - -In your vars.yml file, define the parametrized values (vars.yml): - -```yaml -foundation_one_bucket: aws-bucket-one -foundation_one_domain_name: foundation.one.domain.com -``` - -To check the base.yml has the variables defined in vars.yml, you can run: -`om interpolate --config base.yml --vars-file vars.yml` -If everything works as expected, you should see the following output: - -```yaml -s3_bucket_name: aws-bucket-one -domain_name: foundation.one.domain.com -``` - -Otherwise you will receive an error message indicating missing variables: -``` -could not execute "interpolate": Expected to find variables: ((missing-value)) -``` - -!!! info - If you are using an additional secrets manager, such as credhub, you can add the flag - `--skip-missing` to your `om interpolate` call to allow parametrized variables to - still be present in your config after interpolation, to be later filled in by - interpolating with your secrets manager. See the [Secrets Handling][secrets-handling] page for a more - detailed explanation. - -## Why use variables if you're already using a secrets manager? -[Secrets Handling][secrets-handling] is a secure way to store sensitive information about your foundation, such as -access keys, passwords, ssh keys, etc. The following flowchart gives an example workflow on how you might use -a mix of a secrets manager and vars files across multiple foundations with a single shared `base_vars_template`, -that can be used to generate the `interpolated_vars` unique to a particular foundation, and passed into the relevant -tasks. A separate `var_template.yml` could be used for every foundation to give unique credentials to those -foundations. More common shared settings could be included in the `vars_file.yml`. - -{% include "./variables-interpolate-flowchart-independent.mmd" %} - -Alternatively, you can keep all of your vars in the same file for a foundation and mix parametrized and -unparametrized values. The interpolated vars file can be used directly in any task that allows for them. -The trade-off for this method is the mixed vars file would be tied to a single foundation, rather than -have a single `base_vars_template.yml` shared across foundations. - -{% include "./variables-interpolate-flowchart-mixed.mmd" %} - - -## Using variables in the Platform Automation Toolkit Tasks - -Some Platform Automation Toolkit tasks have an optional vars input. -Using the flow described above, these files can be plugged in to the tasks. - -We provide a [Test Task](../tasks.md#test-interpolate) -to allow pipeline testing before installing Ops Manager. -An example pipeline for this is below: - -```yaml -jobs: -- name: test-interpolate - plan: - - get: - - get: - - get: platform-automation-image - params: - unpack: true - - get: platform-automation-tasks - params: - unpack: true - - task: interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/test-interpolate.yml - input_mapping: - config: - vars: - params: - VARS_FILES: vars/vars.yml # vars/vars2.yml - CONFIG_FILE: base.yml - SKIP_MISSING: true # false to enable strict interpolation - -``` - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/concepts/variables.md.html.erb b/docs/concepts/variables.md.html.erb new file mode 100644 index 00000000..c27429a2 --- /dev/null +++ b/docs/concepts/variables.md.html.erb @@ -0,0 +1,115 @@ +# Platform Automation Toolkit variables + + +Variables provide a way to define parameters for a YAML document. Each variable has a value +and can be referenced in one or more locations. Variables are used in the Platform Automation Toolkit +[tasks](../tasks.html). One example usage is in [configure-director](../tasks.html#configure-director). + +## Why use variables? + +It's typically necessary to separate passwords, certificates, S3 bucket names, and so on, from YAML +documents for security or multi-foundation purposes. Even though the structure +of a YAML document (manifest) does not change, these values are typically different. Variables +require special syntax in the configuration files that need them. The resulting config file is then a +parametrized template for use in multiple situations. + +## Using variables + +In the Platform Automation Toolkit task, you can choose to parametrize the specific entries in the configuration +file by using the `((parametrized-value))` syntax, and then defining the `parametrized-value` in a +separate variable file. +For example, to add two variables to a YAML document (base.yml): + +```yaml +s3_bucket_name: ((foundation_one_bucket)) +domain_name: ((foundation_one_domain_name)) +``` + +In your `vars.yml` file, define the parametrized values (vars.yml): + +```yaml +foundation_one_bucket: aws-bucket-one +foundation_one_domain_name: foundation.one.domain.com +``` + +To check that the base.yml has the variables defined in `vars.yml`, you can run: + +``` +om interpolate --config base.yml --vars-file vars.yml +``` + +If everything works as expected, you should see the following output: + +```yaml +s3_bucket_name: aws-bucket-one +domain_name: foundation.one.domain.com +``` + +Otherwise you will receive an error message indicating that there are missing variables: + +``` +could not execute "interpolate": Expected to find variables: ((missing-value)) +``` + +

+If you are using an additional secrets manager, such as CredHub, you can add the flag +--skip-missing to your om interpolate call to allow parametrized variables to +still be present in your config after interpolation, to be filled in later by +interpolating with your secrets manager. See Using a secrets store to store credentials +for a more detailed explanation.

+ +## Why use variables if you're already using a secrets manager? + +[Using a secrets store to store credentials](./secrets-handling.html) is a secure way to store sensitive information about your foundation, such as +access keys, passwords, SSH keys, and so on. The following flowchart shows an example workflow on how you might use +a mix of a secrets manager and vars files across multiple foundations with a single shared `base_vars_template`, +that can be used to generate the `interpolated_vars` unique to a particular foundation, and passed into the relevant +tasks. A separate `var_template.yml` could be used for every foundation to give unique credentials to those +foundations. More common shared settings could be included in the `vars_file.yml`. + +[//]: # (Include variables-interpolate-flowchart-independent.mmd flowchart) + +![Credentials stored in CredHub go through stages to get to interpolated_vars.yml, including using variables from an addition vars file.](../img/variables-interpolate-flowchart-independent.png) + +Alternatively, you can keep all of your vars in the same file for a foundation and mix parametrized and +unparametrized values. The interpolated vars file can be used directly in any task that allows for them. +The trade-off for this method is that the mixed vars file is then tied to a single foundation, rather than +having a single `base_vars_template.yml` shared across foundations. + +[//]: # (Include variables-interpolate-flowchart-mixed.mmd flowchart) + +![Credentials stored in CredHub go through var_template.yml to get to interpolated_vars.yml.](../img/variables-interpolate-flowchart-mixed.png) + +## Using variables in the Platform Automation Toolkit Tasks + +Some Platform Automation Toolkit tasks have an optional vars input. +Using the flow described earlier, these files can be plugged in to the tasks. + +Platform Automation Toolkit provides a [Test Task](../tasks.html#test-interpolate) +to allow pipeline testing before installing Tanzu Operations Manager. +An example pipeline for this is shown here: + +```yaml +jobs: +- name: test-interpolate + plan: + - get: + - get: + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - task: interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/test-interpolate.yml + input_mapping: + config: + vars: + params: + VARS_FILES: vars/vars.yml # vars/vars2.yml + CONFIG_FILE: base.yml + SKIP_MISSING: true # false to enable strict interpolation + +``` diff --git a/docs/examples/_auth-ldap.html.md.erb b/docs/examples/_auth-ldap.html.md.erb new file mode 100644 index 00000000..2eeeff5f --- /dev/null +++ b/docs/examples/_auth-ldap.html.md.erb @@ -0,0 +1,28 @@ +
+
+    decryption-passphrase: some-passphrase
+    server-url: ldap://example.com
+    ldap-username: cn=admin,dc=opsmanager,dc=com
+    ldap-password: some-password
+    user-search-base: ou=users,dc=opsmanager,dc=com
+    user-search-filter: cn={0}
+    group-search-base: ou=groups,dc=opsmanager,dc=com
+    group-search-filter: member={0}
+    ldap-rbac-admin-group-name: cn=opsmgradmins,ou=groups,dc=opsmanager,dc=com
+    email-attribute: mail
+    ldap-referrals: follow
+
+    # Optional
+    # http-proxy-url:                 # proxy for outbound HTTP network traffic
+    # https-proxy-url:                # proxy for outbound HTTPS network traffic
+    # no-proxy:                       # comma-separated list of hosts that do not go
+                                      # through the proxy
+    # precreated-client-secret:       # create a UAA client on the Ops Manager VM.
+                                      # this will be client-secret in env.yml
+                                      # client ID is precreated-client
+    # server-ssl-cert:                # the server certificate when using ldaps://
+    # skip-create-bosh-admin-client:  # do not create a UAA client on the BOSH
+                                      # director. The client is required to execute
+                                      # BOSH commands from the BOSH CLI
+
+
diff --git a/docs/examples/_auth-saml.html.md.erb b/docs/examples/_auth-saml.html.md.erb new file mode 100644 index 00000000..512bf481 --- /dev/null +++ b/docs/examples/_auth-saml.html.md.erb @@ -0,0 +1,23 @@ +
+
+    ---
+    decryption-passphrase: decryption-passphrase
+    saml-idp-metadata: https://saml.example.com:8080
+    saml-bosh-idp-metadata: https://bosh-saml.example.com:8080
+    saml-rbac-admin-group: opsman.full_control
+    saml-rbac-groups-attribute: myenterprise
+
+    # Optional
+    # http-proxy-url:                 # proxy for outbound HTTP network traffic
+    # https-proxy-url:                # proxy for outbound HTTPS network traffic
+    # no-proxy:                       # comma-separated list of hosts that do not go
+                                      # through the proxy
+    # precreated-client-secret:       # create a UAA client on the Ops Manager VM.
+                                      # this will be client-secret in env.yml
+                                      # client ID is precreated-client                                  
+    # server-ssl-cert:                # the server certificate when using ldaps://
+    # skip-create-bosh-admin-client:  # do not create a UAA client on the BOSH
+                                      # director. The client is required to execute
+                                      # BOSH commands from the BOSH CLI
+
+
diff --git a/docs/examples/_auth.html.md.erb b/docs/examples/_auth.html.md.erb new file mode 100644 index 00000000..080274e7 --- /dev/null +++ b/docs/examples/_auth.html.md.erb @@ -0,0 +1,17 @@ +
+
+    ---
+    username: username
+    password: password
+    decryption-passphrase: decryption-passphrase
+
+    # Optional
+    # http-proxy-url:           # proxy for outbound HTTP network traffic
+    # https-proxy-url:          # proxy for outbound HTTPS network traffic
+    # no-proxy:                 # comma-separated list of hosts that do not go
+                                # through the proxy
+    # precreated-client-secret: # create a UAA client on the Ops Manager VM.
+                                # this will be client-secret in env.yml
+                                # client ID is pre-created-client
+
+
diff --git a/docs/examples/_director.html.md.erb b/docs/examples/_director.html.md.erb new file mode 100644 index 00000000..1220b4f2 --- /dev/null +++ b/docs/examples/_director.html.md.erb @@ -0,0 +1,47 @@ +
+
+    ---
+    az-configuration:
+    - clusters:
+      - cluster: cluster-name
+        resource_pool: resource-pool-name
+      name: AZ01
+
+    properties-configuration:
+      iaas_configuration:
+        vcenter_host: vcenter.example.com
+        vcenter_username: admin
+        vcenter_password: password
+        ......
+      director_configuration:
+        blobstore_type: local
+        bosh_recreate_on_next_deploy: false
+        custom_ssh_banner: null
+        ......
+      security_configuration:
+        generate_vm_passwords: true
+        trusted_certificates:
+      syslog_configuration:
+        enabled: false
+
+    network-assignment:
+      network:
+        name: INFRASTRUCTURE
+      other_availability_zones: []
+      singleton_availability_zone:
+        name: AZ01
+
+    networks-configuration:
+      icmp_checks_enabled: false
+      networks:
+      - name: NETWORK-NAME
+      ......
+
+    resource-configuration:
+      compilation:
+        instance_type:
+          id: automatic
+        instances: automatic
+      ......
+
+
\ No newline at end of file diff --git a/docs/examples/download-product.yml b/docs/examples/_download-product.html.md.erb similarity index 98% rename from docs/examples/download-product.yml rename to docs/examples/_download-product.html.md.erb index 419af5d7..ed87ccca 100644 --- a/docs/examples/download-product.yml +++ b/docs/examples/_download-product.html.md.erb @@ -28,7 +28,7 @@ pivnet-product-slug: product-slug blobstore-bucket: bucket-name s3-region-name: us-west-1 # if NOT using AWS s3, value is 'region' -## Required unless `s3-auth-type: iam` +## Required unless "s3-auth-type: iam" s3-access-key-id: aws-or-minio-key-id s3-secret-access-key: aws-or-minio-secret-key diff --git a/docs/examples/download-stemcell-product.yml b/docs/examples/_download-stemcell-product.html.md.erb similarity index 83% rename from docs/examples/download-stemcell-product.yml rename to docs/examples/_download-stemcell-product.html.md.erb index 2815ca4e..da1f71f2 100644 --- a/docs/examples/download-stemcell-product.yml +++ b/docs/examples/_download-stemcell-product.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet download-stemcell-product-config start yaml +
+
 ---
 pivnet-api-token: token
 pivnet-file-glob: "*vsphere*"       # must be quoted if starting with a *
@@ -14,4 +15,5 @@ product-version: "250.82"
                             # version prepended. Set if the product will
                             # ever be stored in a blobstore
 
-# code_snippet download-stemcell-product-config end
+
+
diff --git a/docs/examples/_env-uaa.html.md.erb b/docs/examples/_env-uaa.html.md.erb new file mode 100644 index 00000000..5802fdf0 --- /dev/null +++ b/docs/examples/_env-uaa.html.md.erb @@ -0,0 +1,19 @@ +
+
+    ---
+    target: https://pcf.example.com
+    connect-timeout: 30          # default 5
+    request-timeout: 1800        # default 1800
+    skip-ssl-validation: false   # default false
+    client-id: client_id
+    client-secret: client_secret
+    # decryption-passphrase is optional,
+    # except for use with `import-installation`.
+    # OpsMan depends on the passphrase
+    # to decrypt the imported installation.
+    # For other commands, providing this key allows
+    # decryption of the OpsMan VM after reboot,
+    # which would otherwise need to be done manually.
+    decryption-passphrase: passphrase
+
+
diff --git a/docs/examples/_env.html.md.erb b/docs/examples/_env.html.md.erb new file mode 100644 index 00000000..8aab9ede --- /dev/null +++ b/docs/examples/_env.html.md.erb @@ -0,0 +1,19 @@ +
+
+    ---
+    target: https://pcf.example.com
+    connect-timeout: 30            # default 5
+    request-timeout: 1800          # default 1800
+    skip-ssl-validation: false     # default false
+    username: username
+    password: password
+    # decryption-passphrase is optional,
+    # except for use with `import-installation`.
+    # OpsMan depends on the passphrase
+    # to decrypt the imported installation.
+    # For other commands, providing this key allows
+    # decryption of the OpsMan VM after reboot,
+    # which would otherwise need to be done manually.
+    decryption-passphrase: passphrase
+
+
diff --git a/docs/examples/product.yml b/docs/examples/_product.html.md.erb similarity index 93% rename from docs/examples/product.yml rename to docs/examples/_product.html.md.erb index 9d88b365..4974dc60 100644 --- a/docs/examples/product.yml +++ b/docs/examples/_product.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet product-configuration start yaml +
+
 ---
 product-properties:
   .healthwatch-forwarder.bosh_taskcheck_username:
@@ -60,4 +61,5 @@ resource-config:
       size_mb: automatic
     instance_type:
       id: automatic
-# code_snippet product-configuration end yaml
+
+
diff --git a/docs/examples/state.yml b/docs/examples/_state.html.md.erb similarity index 100% rename from docs/examples/state.yml rename to docs/examples/_state.html.md.erb diff --git a/docs/examples/telemetry.yml b/docs/examples/_telemetry.html.md.erb similarity index 71% rename from docs/examples/telemetry.yml rename to docs/examples/_telemetry.html.md.erb index ee384687..2a937398 100644 --- a/docs/examples/telemetry.yml +++ b/docs/examples/_telemetry.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet telemetry start yaml +
+
 ---
 env-type: sandbox     # sandbox|development|qa|pre-production|production
 
@@ -10,6 +11,7 @@ usage-service-client-secret:
 usage-service-insecure-skip-tls-verify:
 
 # CredHub (Optional)
-# with-credhub-info:  # include Credhub certificate expiry information
+# with-credhub-info:  # include CredHub certificate expiry information
 
-# code_snippet telemetry end
+
+
diff --git a/docs/examples/anchors/_credhub-interpolate.html.md.erb b/docs/examples/anchors/_credhub-interpolate.html.md.erb new file mode 100644 index 00000000..c2d56d1c --- /dev/null +++ b/docs/examples/anchors/_credhub-interpolate.html.md.erb @@ -0,0 +1,22 @@ +
+
+    resource-types:
+    resources:
+
+    credhub-interpolate: &credhub-interpolate
+      image: platform-automation-image
+      file: platform-automation-tasks/tasks/credhub-interpolate.yml
+      params:
+        CREDHUB_CLIENT: ((credhub-client))
+        CREDHUB_SECRET: ((credhub-secret))
+        CREDHUB_SERVER: ((credhub-server))
+        PREFIX: '/pipeline/vsphere'
+        INTERPOLATION_PATHS: "download-product-configs"
+      input_mapping:
+        files: config
+      output_mapping:
+        interpolated-files: config
+
+    jobs:
+
+
\ No newline at end of file diff --git a/docs/examples/anchors/_subbing-credhub-interpolate.html.md.erb b/docs/examples/anchors/_subbing-credhub-interpolate.html.md.erb new file mode 100644 index 00000000..b1f7c190 --- /dev/null +++ b/docs/examples/anchors/_subbing-credhub-interpolate.html.md.erb @@ -0,0 +1,6 @@ +
+
+    - task: credhub-interpolate
+      <<: *credhub-interpolate
+  
+  
\ No newline at end of file diff --git a/docs/examples/anchors/credhub-interpolate.yml b/docs/examples/anchors/credhub-interpolate.yml deleted file mode 100644 index 8cfd0bd5..00000000 --- a/docs/examples/anchors/credhub-interpolate.yml +++ /dev/null @@ -1,18 +0,0 @@ -resource-types: -resources: - -credhub-interpolate: &credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - params: - CREDHUB_CLIENT: ((credhub-client)) - CREDHUB_SECRET: ((credhub-secret)) - CREDHUB_SERVER: ((credhub-server)) - PREFIX: '/pipeline/vsphere' - INTERPOLATION_PATHS: "download-product-configs" - input_mapping: - files: config - output_mapping: - interpolated-files: config - -jobs: \ No newline at end of file diff --git a/docs/examples/anchors/subbing-credhub-interpolate.yml b/docs/examples/anchors/subbing-credhub-interpolate.yml deleted file mode 100644 index 0f578854..00000000 --- a/docs/examples/anchors/subbing-credhub-interpolate.yml +++ /dev/null @@ -1,2 +0,0 @@ -- task: credhub-interpolate - <<: *credhub-interpolate \ No newline at end of file diff --git a/docs/examples/auth-ldap.yml b/docs/examples/auth-ldap.yml deleted file mode 100644 index 05ec3bae..00000000 --- a/docs/examples/auth-ldap.yml +++ /dev/null @@ -1,27 +0,0 @@ -# code_snippet ldap-auth-configuration start yaml -decryption-passphrase: some-passphrase -server-url: ldap://example.com -ldap-username: cn=admin,dc=opsmanager,dc=com -ldap-password: some-password -user-search-base: ou=users,dc=opsmanager,dc=com -user-search-filter: cn={0} -group-search-base: ou=groups,dc=opsmanager,dc=com -group-search-filter: member={0} -ldap-rbac-admin-group-name: cn=opsmgradmins,ou=groups,dc=opsmanager,dc=com -email-attribute: mail -ldap-referrals: follow - -# Optional -# http-proxy-url: # proxy for outbound HTTP network traffic -# https-proxy-url: # proxy for outbound HTTPS network traffic -# no-proxy: # comma-separated list of hosts that do not go - # through the proxy -# precreated-client-secret: # create a UAA client on the Ops Manager VM. - # this will be client-secret in env.yml - # client ID is precreated-client -# server-ssl-cert: # the server certificate when using ldaps:// -# skip-create-bosh-admin-client: # do not create a UAA client on the BOSH - # director. The client is required to execute - # BOSH commands from the BOSH CLI - -# code_snippet ldap-auth-configuration end diff --git a/docs/examples/auth-saml.yml b/docs/examples/auth-saml.yml deleted file mode 100644 index bc85e800..00000000 --- a/docs/examples/auth-saml.yml +++ /dev/null @@ -1,22 +0,0 @@ -# code_snippet saml-auth-configuration start yaml ---- -decryption-passphrase: decryption-passphrase -saml-idp-metadata: https://saml.example.com:8080 -saml-bosh-idp-metadata: https://bosh-saml.example.com:8080 -saml-rbac-admin-group: opsman.full_control -saml-rbac-groups-attribute: myenterprise - -# Optional -# http-proxy-url: # proxy for outbound HTTP network traffic -# https-proxy-url: # proxy for outbound HTTPS network traffic -# no-proxy: # comma-separated list of hosts that do not go - # through the proxy -# precreated-client-secret: # create a UAA client on the Ops Manager VM. - # this will be client-secret in env.yml - # client ID is precreated-client -# server-ssl-cert: # the server certificate when using ldaps:// -# skip-create-bosh-admin-client: # do not create a UAA client on the BOSH - # director. The client is required to execute - # BOSH commands from the BOSH CLI - -# code_snippet saml-auth-configuration end diff --git a/docs/examples/auth.yml b/docs/examples/auth.yml deleted file mode 100644 index e5afec65..00000000 --- a/docs/examples/auth.yml +++ /dev/null @@ -1,15 +0,0 @@ -# code_snippet auth-configuration start yaml ---- -username: username -password: password -decryption-passphrase: decryption-passphrase - -# Optional -# http-proxy-url: # proxy for outbound HTTP network traffic -# https-proxy-url: # proxy for outbound HTTPS network traffic -# no-proxy: # comma-separated list of hosts that do not go - # through the proxy -# precreated-client-secret: # create a UAA client on the Ops Manager VM. - # this will be client-secret in env.yml - # client ID is precreated-client -# code_snippet auth-configuration end diff --git a/docs/examples/director.yml b/docs/examples/director.yml deleted file mode 100644 index 7e68b721..00000000 --- a/docs/examples/director.yml +++ /dev/null @@ -1,45 +0,0 @@ -# code_snippet director-configuration start yaml ---- -az-configuration: -- clusters: - - cluster: cluster-name - resource_pool: resource-pool-name - name: AZ01 - -properties-configuration: - iaas_configuration: - vcenter_host: vcenter.example.com - vcenter_username: admin - vcenter_password: password - ...... - director_configuration: - blobstore_type: local - bosh_recreate_on_next_deploy: false - custom_ssh_banner: null - ...... - security_configuration: - generate_vm_passwords: true - trusted_certificates: - syslog_configuration: - enabled: false - -network-assignment: - network: - name: INFRASTRUCTURE - other_availability_zones: [] - singleton_availability_zone: - name: AZ01 - -networks-configuration: - icmp_checks_enabled: false - networks: - - name: NETWORK-NAME - ...... - -resource-configuration: - compilation: - instance_type: - id: automatic - instances: automatic - ...... -# code_snippet director-configuration end \ No newline at end of file diff --git a/docs/examples/env-uaa.yml b/docs/examples/env-uaa.yml deleted file mode 100644 index 9c7bcabb..00000000 --- a/docs/examples/env-uaa.yml +++ /dev/null @@ -1,17 +0,0 @@ -# code_snippet env-uaa start yaml ---- -target: https://pcf.example.com -connect-timeout: 30 # default 5 -request-timeout: 1800 # default 1800 -skip-ssl-validation: false # default false -client-id: client_id -client-secret: client_secret -# decryption-passphrase is optional, -# except for use with `import-installation`. -# OpsMan depends on the passphrase -# to decrypt the imported installation. -# For other commands, providing this key allows -# decryption of the OpsMan VM after reboot, -# which would otherwise need to be done manually. -decryption-passphrase: passphrase -# code_snippet env-uaa end diff --git a/docs/examples/env.yml b/docs/examples/env.yml deleted file mode 100644 index f32dedb2..00000000 --- a/docs/examples/env.yml +++ /dev/null @@ -1,17 +0,0 @@ -# code_snippet env start yaml ---- -target: https://pcf.example.com -connect-timeout: 30 # default 5 -request-timeout: 1800 # default 1800 -skip-ssl-validation: false # default false -username: username -password: password -# decryption-passphrase is optional, -# except for use with `import-installation`. -# OpsMan depends on the passphrase -# to decrypt the imported installation. -# For other commands, providing this key allows -# decryption of the OpsMan VM after reboot, -# which would otherwise need to be done manually. -decryption-passphrase: passphrase -# code_snippet env end diff --git a/docs/examples/opsman-config/aws.yml b/docs/examples/opsman-config/_aws1.html.md.erb similarity index 92% rename from docs/examples/opsman-config/aws.yml rename to docs/examples/opsman-config/_aws1.html.md.erb index 4b9d7be8..e0d46565 100644 --- a/docs/examples/opsman-config/aws.yml +++ b/docs/examples/opsman-config/_aws1.html.md.erb @@ -1,11 +1,12 @@ -# code_snippet aws-configuration start yaml +
+
 ---
 opsman-configuration:
   aws:
     region: us-west-2
     vpc_subnet_id: subnet-0292bc845215c2cbf
     security_group_ids: [ sg-0354f804ba7c4bc41 ]
-    key_pair_name: ops-manager-key  # used to ssh to VM
+    key_pair_name: ops-manager-key  # used to SSH to VM
     iam_instance_profile_name: env_ops_manager
 
     # At least one IP address (public or private) needs to be assigned to the
@@ -38,4 +39,5 @@ opsman-configuration:
   # banner-settings: ...
   # syslog-settings: ...
   # rbac-settings: ...
-# code_snippet aws-configuration end
+
+
diff --git a/docs/examples/opsman-config/azure.yml b/docs/examples/opsman-config/_azure1.html.md.erb similarity index 97% rename from docs/examples/opsman-config/azure.yml rename to docs/examples/opsman-config/_azure1.html.md.erb index 994b186a..ca327d9e 100644 --- a/docs/examples/opsman-config/azure.yml +++ b/docs/examples/opsman-config/_azure1.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet azure-configuration start yaml +
+
 ---
 opsman-configuration:
   azure:
@@ -52,4 +53,5 @@ opsman-configuration:
   # banner-settings: ...
   # syslog-settings: ...
   # rbac-settings: ...
-# code_snippet azure-configuration end
+
+
diff --git a/docs/examples/opsman-config/gcp.yml b/docs/examples/opsman-config/_gcp1.html.md.erb similarity index 93% rename from docs/examples/opsman-config/gcp.yml rename to docs/examples/opsman-config/_gcp1.html.md.erb index fb9e5747..c80c6139 100644 --- a/docs/examples/opsman-config/gcp.yml +++ b/docs/examples/opsman-config/_gcp1.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet gcp-configuration start yaml +
+
 ---
 opsman-configuration:
   gcp:
@@ -34,4 +35,5 @@ opsman-configuration:
   # banner-settings: ...
   # syslog-settings: ...
   # rbac-settings: ...
-# code_snippet gcp-configuration end
+
+
diff --git a/docs/examples/opsman-config/openstack.yml b/docs/examples/opsman-config/_openstack1.html.md.erb similarity index 91% rename from docs/examples/opsman-config/openstack.yml rename to docs/examples/opsman-config/_openstack1.html.md.erb index 78891044..bcb50472 100644 --- a/docs/examples/opsman-config/openstack.yml +++ b/docs/examples/opsman-config/_openstack1.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet openstack-configuration start yaml +
+
 ---
 opsman-configuration:
   openstack:
@@ -29,4 +30,5 @@ opsman-configuration:
   # banner-settings: ...
   # syslog-settings: ...
   # rbac-settings: ...
-# code_snippet openstack-configuration end
\ No newline at end of file
+
+
\ No newline at end of file diff --git a/docs/examples/opsman-config/settings.yml b/docs/examples/opsman-config/_settings.html.md.erb similarity index 94% rename from docs/examples/opsman-config/settings.yml rename to docs/examples/opsman-config/_settings.html.md.erb index acf8ea4f..aa492b10 100644 --- a/docs/examples/opsman-config/settings.yml +++ b/docs/examples/opsman-config/_settings.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet opsman-settings start yaml +
+
 # These are OPTIONAL settings that can exist in your opsman.yml
 # When upgrading an Ops Manager, these are configurations
 # that can be updated on the Settings page in the Ops Manager UI.
@@ -40,4 +41,5 @@ rbac-settings: # if your RBAC is SAML, use these settings
 opsman-configuration:
   aws: # azure, gcp, openstack, vsphere
     ...
-# code_snippet opsman-settings end yaml
\ No newline at end of file
+
+
diff --git a/docs/examples/opsman-config/vsphere.yml b/docs/examples/opsman-config/_vsphere1.html.md.erb similarity index 94% rename from docs/examples/opsman-config/vsphere.yml rename to docs/examples/opsman-config/_vsphere1.html.md.erb index be54d3be..3311b3ee 100644 --- a/docs/examples/opsman-config/vsphere.yml +++ b/docs/examples/opsman-config/_vsphere1.html.md.erb @@ -1,4 +1,5 @@ -# code_snippet vsphere-configuration start yaml +
+
 ---
 opsman-configuration:
   vsphere:
@@ -42,4 +43,5 @@ opsman-configuration:
   # banner-settings: ...
   # syslog-settings: ...
   # rbac-settings: ...
-# code_snippet vsphere-configuration end
+
+
\ No newline at end of file diff --git a/docs/examples/state/_aws.html.md.erb b/docs/examples/state/_aws.html.md.erb new file mode 100644 index 00000000..1173f98d --- /dev/null +++ b/docs/examples/state/_aws.html.md.erb @@ -0,0 +1,7 @@ +
+    
+        iaas: aws
+        # Instance ID of the AWS VM
+        vm_id: i-12345678987654321
+    
+    
diff --git a/docs/examples/state/_azure.html.md.erb b/docs/examples/state/_azure.html.md.erb new file mode 100644 index 00000000..2754805a --- /dev/null +++ b/docs/examples/state/_azure.html.md.erb @@ -0,0 +1,7 @@ +
+    
+        iaas: azure
+        # Computer Name of the Azure VM
+        vm_id: vm_name
+    
+    
diff --git a/docs/examples/state/_gcp.html.md.erb b/docs/examples/state/_gcp.html.md.erb new file mode 100644 index 00000000..212bc9a0 --- /dev/null +++ b/docs/examples/state/_gcp.html.md.erb @@ -0,0 +1,7 @@ +
+    
+        iaas: gcp
+        # Name of the VM in GCP
+        vm_id: vm_name
+    
+    
diff --git a/docs/examples/state/_openstack.html.md.erb b/docs/examples/state/_openstack.html.md.erb new file mode 100644 index 00000000..3a7d6a46 --- /dev/null +++ b/docs/examples/state/_openstack.html.md.erb @@ -0,0 +1,7 @@ +
+    
+        iaas: openstack
+        # Instance ID from the OpenStack Overview
+        vm_id: 12345678-9876-5432-1abc-defghijklmno
+    
+    
diff --git a/docs/examples/state/_vsphere.html.md.erb b/docs/examples/state/_vsphere.html.md.erb new file mode 100644 index 00000000..a43ff02b --- /dev/null +++ b/docs/examples/state/_vsphere.html.md.erb @@ -0,0 +1,7 @@ +
+    
+        iaas: vsphere
+        # Path to the VM in vCenter
+        vm_id: /datacenter/vm/folder/vm_name
+    
+    
diff --git a/docs/examples/state/aws.yml b/docs/examples/state/aws.yml deleted file mode 100644 index 229e2f07..00000000 --- a/docs/examples/state/aws.yml +++ /dev/null @@ -1,3 +0,0 @@ -iaas: aws -# Instance ID of the AWS VM -vm_id: i-12345678987654321 diff --git a/docs/examples/state/azure.yml b/docs/examples/state/azure.yml deleted file mode 100644 index 56508319..00000000 --- a/docs/examples/state/azure.yml +++ /dev/null @@ -1,3 +0,0 @@ -iaas: azure -# Computer Name of the Azure VM -vm_id: vm_name diff --git a/docs/examples/state/gcp.yml b/docs/examples/state/gcp.yml deleted file mode 100644 index cc030a40..00000000 --- a/docs/examples/state/gcp.yml +++ /dev/null @@ -1,3 +0,0 @@ -iaas: gcp -# Name of the VM in GCP -vm_id: vm_name diff --git a/docs/examples/state/openstack.yml b/docs/examples/state/openstack.yml deleted file mode 100644 index e373eb14..00000000 --- a/docs/examples/state/openstack.yml +++ /dev/null @@ -1,3 +0,0 @@ -iaas: openstack -# Instance ID from the OpenStack Overview -vm_id: 12345678-9876-5432-1abc-defghijklmno diff --git a/docs/examples/state/vsphere.yml b/docs/examples/state/vsphere.yml deleted file mode 100644 index a6d445c7..00000000 --- a/docs/examples/state/vsphere.yml +++ /dev/null @@ -1,3 +0,0 @@ -iaas: vsphere -# Path to the VM in vCenter -vm_id: /datacenter/vm/folder/vm_name diff --git a/docs/getting-started.html.md.erb b/docs/getting-started.html.md.erb new file mode 100644 index 00000000..76de87cb --- /dev/null +++ b/docs/getting-started.html.md.erb @@ -0,0 +1,34 @@ +# Getting started + +If you are using Platform Automation Toolkit for the first time, it is useful to review the [Overview](./index.html) of Platform Automation Toolkit before diving into more technical content. + +## How-to guides + +For in-depth procedures for deploying a new Tanzu Operations Manager and +taking over management of an existing Tanzu Operations Manager +with Platform Automation Toolkit, see the following topics: + +- For information about deploying Concourse with CredHub and User Account and Authentication (UAA), + see [Install Concourse for Platform Automation](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/concourse-for-tanzu/7-0/tanzu-concourse/index.html). +- For information about deploying Platform Automation Toolkit + with a new Tanzu Operations Manager, see [Installing Tanzu Operations Manager](./how-to-guides/installing-opsman.html). +- For information about deploying Platform Automation Toolkit + with an existing Tanzu Operations Manager, see + [Upgrading an existing Tanzu Operations Manager](./how-to-guides/upgrade-existing-opsman.html). + +## Reference pipelines + +To see an example of a finished pipeline, +review the following reference pipeline: + +- A [multi-product pipeline](./pipelines/multiple-products.html) + with [Tanzu Platform for Cloud Foundry](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-platform-for-cloud-foundry/10-0/tpcf/concepts-overview.html) and [Healthwatch](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform-services/healthwatch-for-vmware-tanzu/2-3/healthwatch/index.html). + +## Other references + +To see a list of all of the tasks, their usage, +their inputs and outputs, and their parameters, +see the [Task Reference](./tasks.html). + +To learn more about the format of the files used with the tasks, +see the [Task inputs and outputs reference](./inputs-outputs.html). diff --git a/docs/getting-started.md b/docs/getting-started.md deleted file mode 100644 index e757278f..00000000 --- a/docs/getting-started.md +++ /dev/null @@ -1,39 +0,0 @@ -If you are using Platform Automation Toolkit for the first time, -you have a few options. - -Our [Overview][overview] of Platform Automation Toolkit describes it conceptually, -and is useful to review prior to diving into more technical content. - -## How-to Guides - -We have in-depth procedures for deploying a new Ops Manager and -taking over management of an existing Ops Manager -with Platform Automation Toolkit: - -- For more information about deploying Concourse with Credhub and UAA, - see [Install Concourse for Platform Automation][concourse-for-pa]. -- For more information about deploying Platform Automation Toolkit - with a new Ops Manager, see [Installing Ops Manager][install-how-to]. -- For more information about deploying Platform Automation Toolkit - with an existing Ops Manager, see - [Upgrading an Existing Ops Manager][upgrade-how-to]. - -## Reference Pipelines - -To see an example of a finished pipeline, -see one of the following reference pipelines: - -- A [multi-product pipeline][reference-pipeline] - with [Tanzu Application Service][tas] and [Healthwatch][healthwatch]. - -## Other References - -To see a list of all of the tasks, their usage, -their inputs and outputs, and their parameters, -see the [Task Reference][task-reference]. - -To learn more about the format of the files used with the tasks, -see the [Task Inputs and Outputs Reference][inputs-outputs]. - -{% include ".internal_link_url.md" %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/.getting-started.md b/docs/how-to-guides/.getting-started.md deleted file mode 100644 index 56dec718..00000000 --- a/docs/how-to-guides/.getting-started.md +++ /dev/null @@ -1,526 +0,0 @@ -## Prerequisites - -Over the course of this guide, -we're going to use Platform Automation Toolkit -to create a [pipeline][concourse-pipeline] -using [Concourse][concourse]. - -Before we get started, you'll need a few things ready to go: - -{% if upgradeHowTo %} -1. A running Ops Manager VM that you would like to upgrade -{% endif %} -1. Credentials for an IaaS that Ops Manager is compatible with - - It doesn't actually matter what IaaS you use for Ops Manager, - as long as your Concourse can connect to it. - Pipelines built with Platform Automation Toolkit can be platform-agnostic. -1. A Concourse instance - with access to a Credhub instance - and to the Internet -1. GitHub account -1. Read/write credentials and bucket name for an S3 bucket -1. An account on [VMware Tanzu Network][tanzu-network] -1. A MacOS workstation - - with Docker installed - - a text editor you like - - a terminal emulator you like - - a browser that works with Concourse, - like Firefox or Chrome - - and `git` - -It will be very helpful to have a basic familiarity with the following. If you don't have basic familiarity with all these things, -that's okay. -We'll explain some basics, -and link to resources to learn more: - -- the bash terminal -- [git][git] -- [YAML][yaml] -- [Concourse][concourse] - - -!!! info "A note on the prerequisites" -

While this guide uses Github to provide a git remote, - and an S3 bucket as a blobstore, - Platform Automation Toolkit supports arbitrary git providers - and S3-compatible blobstores. -

If you need to use an alternate one, - that's okay. -

We picked specific examples - so we could describe some steps in detail. - Some details may be different - if you follow along with different providers. - If you're comfortable navigating those differences on your own, - go for it! -

Check out our reference for [using an S3-specific blobstore][setup-s3-and-resources] -

Similarly, in this guide, we assume the MacOS operating system. - This should all work fine on Linux, too, - but there might be differences in the paths you'll need to figure out. - -## Creating a Concourse Pipeline - -Platform Automation Toolkit's tasks and image are meant to be used in a Concourse pipeline. -So, let's make one. - -Using your bash command-line client, -create a directory to keep your pipeline files in, and `cd` into it. - -```bash -mkdir your-repo-name -cd !$ -``` - -This repo name should relate to your situation -and be specific enough to be navigable from your local workstation. - -!!! tip ""`!$`"" - `!$` is a bash shortcut. - Pronounced "bang, dollar-sign," - it means "use the last argument from the most recent command." - In this case, that's the directory we just created! - This is not a Platform Automation Toolkit thing, - this is just a bash tip dearly beloved - of at least one Platform Automator. - -{% if upgradeHowTo %} - -Before we get started with the pipeline itself, -we'll gather some variables in a file -we can use throughout our pipeline. - -Open your text editor and create `vars.yml`. -Here's what it should look like to start, we can add things to this as we go: - -```yaml -platform-automation-bucket: your-bucket-name -credhub-server: https://your-credhub.example.com -opsman-url: https://pcf.foundation.example.com -``` - -!!! info "Using a DNS" - This example assumes that you're using DNS and hostnames. - You can use IP addresses for all these resources instead, - but you still need to provide the information as a URL, - for example: `https://120.121.123.124` - -{% endif %} - -Now, create a file called `pipeline.yml`. - -!!! info "Naming" - We'll use `pipeline.yml` in our examples throughout this guide. - However, you may create multiple pipelines over time. - If there's a more sensible name for the pipeline you're working on, - feel free to use that instead. - -Write this at the top, and save the file. This is [YAML][yaml] for "the start of the document". It's optional, but traditional: - -```yaml - ---- -``` - -Now you have a pipeline file! Nominally! -Well, look. -It's valid YAML, at least. - -### Getting `fly` - -Let's try to set it as a pipeline with [`fly`][concourse-fly], -the Concourse command-line Interface (CLI). - -First, check if we've got `fly` installed at all: - -```bash -fly -v -``` - -If it gives you back a version number, great! -Skip ahead to [Setting The Pipeline](#setting-the-pipeline) - -If it says something like `-bash: fly: command not found`, -we have a little work to do: we've got to get `fly`. - -Navigate to the address for your Concourse instance in a web browser. -At this point, you don't even need to be signed in! -If there are no public pipelines, you should see something like this: - -![Get Fly][fly-download-image] - -If there _are_ public pipelines, -or if you're signed in and there are pipelines you can see, -you'll see something similar in the lower-right hand corner. - -Click the icon for your OS and save the file, -`mv` the resulting file to somewhere in your `$PATH`, -and use `chmod` to make it executable: - -!!! info "A note on command-line examples" - Some of these, you can copy-paste directly into your terminal. - Some of them won't work that way, - or even if they did, would require you to edit them to replace our example values - with your actual values. - We recommend you type all of the bash examples in by hand, - substituting values, if necessary, as you go. - Don't forget that you can often hit the `tab` key - to auto-complete the name of files that already exist; - it makes all that typing just a little easier, - and serves as a sort of command-line autocorrect. - -```bash -mv ~/Downloads/fly /usr/local/bin/fly -chmod +x !$ -``` - -Congrats! You got `fly`. - -!!! info "Okay but what did I just do?" - FAIR QUESTION. You downloaded the `fly` binary, - moved it into bash's PATH, - which is where bash looks for things to execute - when you type a command, - and then added permissions that allow it to be e`x`ecuted. - Now, the CLI is installed - - and we won't have to do all that again, - because `fly` has the ability to update itself, - which we'll get into later. - -### Setting The Pipeline - -Okay _now_ let's try to set our pipeline with `fly`, the Concourse CLI. - -`fly` keeps a list of Concourses it knows how to talk to. -Let's see if the Concourse we want is already on the list: - -```bash -fly targets -``` - -If you see the address of the Concourse you want to use in the list, -note down its name, and use it in the login command: - -```bash -fly -t control-plane login -``` - -!!! info "Control-plane?" - We're going to use the name `control-plane` - for our Concourse in this guide. - It's not a special name, - it just happens to be the name - of the Concourse we want to use in our target list. - -If you don't see the Concourse you need, you can add it with the `-c` (`--concourse-url`)flag: - -```bash -fly -t control-plane login -c https://your-concourse.example.com -``` - -You should see a login link you can click on -to complete login from your browser. - -!!! tip "Stay on target" -

The `-t` flag sets the name when used with `login` and `-c`. - In the future, you can leave out the `-c` argument. -

If you ever want to know what a short flag stands for, - you can run the command with `-h` (`--help`) at the end. - -Pipeline-setting time! -We'll use the name "foundation" for this pipeline, -but if your foundation has an actual name, use that instead. - -```bash -fly -t control-plane set-pipeline -p foundation -c pipeline.yml -``` - -It should say `no changes to apply`, -which is fair, since we gave it an empty YAML doc. - -!!! info "Version discrepancy" - If `fly` says something about a "version discrepancy," - "significant" or otherwise, just do as it says: - run `fly sync` and try again. - `fly sync` automatically updates the CLI - with the version that matches the Concourse you're targeting. - Useful! - -### Your First Job - -Let's see Concourse actually _do_ something, yeah? - -Add this to your `pipeline.yml`, starting on the line after the `---`: - -```yaml -wait: no nevermind let's get version control first -``` - -Good point. Don't actually add that to your pipeline config yet. -Or if you have, delete it, so your whole pipeline looks like this again: - -```yaml - ---- -``` - -Reverting edits to our pipeline is something we'll probably want to do again. -This is one of many reasons we want to keep our pipeline under version control. - -So let's make this directory a git repo! - -#### But First, `git init` - -!!! tip "Git Repository Layout" -

The following describes a step-by-step approach for how to get set up with git. -

For an example of the repository file structure - for single and multiple foundation systems, - please reference [Git Repository Layout][git-repo-layout]. - -`git` should come back with information about the commit you just created: - -```bash -git init -git commit --allow-empty -m "Empty initial commit" -``` - -If it gives you a config error instead, -you might need to configure `git` a bit. -Here's a [good guide][git-first-time-setup] -to initial setup. -Get that done, and try again. - -Now we can add our `pipeline.yml`, -so in the future it's easy to get back to that soothing `---` state. - -```bash -git add pipeline.yml {% if upgradeHowTo %}vars.yml{% endif %} -git commit -m "Add pipeline{% if upgradeHowTo %} and starter vars{% endif %}" -``` - -Let's just make sure we're all tidy: - -```bash -git status -``` - -`git` should come back with `nothing to commit, working tree clean`. - -Great. Now we can safely make changes. - -!!! tip "Git commits" -

`git` commits are the basic unit of code history. -

Making frequent, small, commits with good commit messages - makes it _much easier_ to figure out why things are the way they are, - and to return to the way things were in simpler, better times. - Writing short commit messages that capture the _intent_ of the change - (in an imperative style) can be tough, - but it really does make the pipeline's history much more legible, - both to future-you, - and to current-and-future teammates and collaborators. - -#### The Test Task - -Platform Automation Toolkit comes with a [`test`][test] task -meant to validate that it's been installed correctly. -Let's use it to get setup. - -Add this to your `pipeline.yml`, starting on the line after the `---`: - -```yaml -jobs: -- name: test - plan: - - task: test - image: platform-automation-image - file: platform-automation-tasks/tasks/test.yml -``` - -If we try to set this now, Concourse will take it: - -```bash -fly -t control-plane set-pipeline -p foundation -c pipeline.yml -``` - -Now we should be able to see our pipeline -in the Concourse UI. -It'll be paused, so click the "play" button to unpause it. -Then, click in to the gray box for our `test` job, -and hit the "plus" button to schedule a build. - -It should error immediately, with `unknown artifact source: platform-automation-tasks`. -We didn't give it a source for our task file. - -We've got a bit of pipeline code that Concourse accepts. -Before we start doing the next part, -this would be a good moment to make a commit: - -```bash -git add pipeline.yml -git commit -m "Add (nonfunctional) test task" -``` - -With that done, -we can try to get the inputs we need -by adding `get` steps to the plan -before the task, like so: - -```yaml hl_lines="4-13" -jobs: -- name: test - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - task: test - image: platform-automation-image - file: platform-automation-tasks/tasks/test.yml -``` - -!!! note "When using vSphere" - There is a smaller vSphere container image available. - To use it instead of the general purpose image, - you can use this glob to get the image: - - ```yaml - - get: platform-automation-image - resource: platform-automation - params: - globs: ["vsphere-platform-automation-image*.tar.gz"] - unpack: true - ``` - -If we try to `fly set` this, -`fly` will complain about invalid resources. - -To actually make the `image` and `file` we want to use available, -we'll need some Resources. - -#### Adding Resources - -Resources are Concourse's main approach to managing artifacts. -We need an image, and the tasks directory - -so we'll tell Concourse how to get these things by declaring Resources for them. - -In this case, we'll be downloading the image and the tasks directory from Tanzu Network. -Before we can declare the resources themselves, -we have to teach Concourse to talk to Tanzu Network. -(Many resource types are built in, but this one isn't.) - -Add the following to your pipeline file. -We'll put it above the `jobs` entry. - -```yaml -resource_types: -- name: pivnet - type: docker-image - source: - repository: pivotalcf/pivnet-resource - tag: latest-final -resources: -- name: platform-automation - type: pivnet - source: - product_slug: platform-automation - api_token: ((pivnet-refresh-token)) -``` - -The API token is a credential, -which we'll pass via the command-line when setting the pipeline, -so we don't accidentally check it in. - -Grab a refresh token from your Tanzu Network profile -(when logged in, click your username, then `Edit Profile`) -and clicking "Request New Refresh Token." -Then use that token in the following command: - -!!! tip "Keep it secret, keep it safe" - Bash commands that start with a space character - are not saved in your history. - This can be very useful for cases like this, - where you want to pass a secret, - but don't want it saved. - Commands in this guide that contain a secret - start with a space, which can be easy to miss. - -```bash -# note the space before the command - fly -t control-plane set-pipeline \ - -p foundation \ - -c pipeline.yml \ - -v pivnet-refresh-token=your-api-token -``` - -!!! warning Getting Your Tanzu Network Token Expires It - When you get your Tanzu Network token as described above, - any previous Tanzu Network tokens you may have gotten will stop working. - If you're using your Tanzu Network refresh token anywhere, - retrieve it from your existing secret storage rather than getting a new one, - or you'll end up needing to update it everywhere it's used. - -Go back to the Concourse UI and trigger another build. -This time, it should pass. - -Commit time! - -```bash -git add pipeline.yml -git commit -m "Add resources needed for test task" -``` - -We'd rather not pass our Tanzu Network token -every time we need to set the pipeline. -Fortunately, Concourse can integrate -with secret storage services. - -Let's put our API token in Credhub so Concourse can get it. - -First we'll need to login: - -!!! info "Backslashes in bash examples" - The following example has been broken across multiple lines - by using backslash characters (`\`) to escape the newlines. - We'll be doing this a lot to keep the examples readable. - When you're typing these out, - you can skip that and just put it all on one line. - -Again, note the space at the start - -{% include ".logging-into-credhub.md" %} - -Then, we can set the credential name -to the path [where Concourse will look for it][concourse-credhub-lookup-rules]: - -```bash -# note the starting space - credhub set \ - --name /concourse/your-team-name/pivnet-refresh-token \ - --type value \ - --value your-credhub-refresh-token -``` - -Now, let's set that pipeline again, -without passing a secret this time. - -```bash -fly -t control-plane set-pipeline \ - -p foundation \ - -c pipeline.yml -``` - -This should succeed, -and the diff Concourse shows you should replace the literal credential -with `((pivnet-refresh-token))`. - -Visit the UI again and re-run the test job; -this should also succeed. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/.logging-into-credhub.md b/docs/how-to-guides/.logging-into-credhub.md deleted file mode 100644 index c23515b3..00000000 --- a/docs/how-to-guides/.logging-into-credhub.md +++ /dev/null @@ -1,22 +0,0 @@ -```bash -# note the starting space - credhub login --server example.com \ - --client-name your-client-id \ - --client-secret your-client-secret -``` - -!!! info "Logging in to credhub" - Depending on your credential type, - you may need to pass `client-id` and `client-secret`, - as we do above, - or `username` and `password`. - We use the `client` approach because that's the credential type - that automation should usually be working with. - Nominally, a username represents a person, - and a client represents a system; - this isn't always exactly how things are in practice. - Use whichever type of credential you have in your case. - Note that if you exclude either set of flags, - Credhub will interactively prompt for `username` and `password`, - and hide the characters of your password when you type them. - This method of entry can be better in some situations. diff --git a/docs/how-to-guides/.opsman-config-tabs.md b/docs/how-to-guides/.opsman-config-tabs.md deleted file mode 100644 index e403d91f..00000000 --- a/docs/how-to-guides/.opsman-config-tabs.md +++ /dev/null @@ -1,40 +0,0 @@ -=== "AWS" - ```yaml - --- - pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "ops-manager-aws*.yml" - pivnet-product-slug: ops-manager - product-version-regex: ^2\.5\.\d+$ - ``` -=== "Azure" - ```yaml - --- - pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "ops-manager-azure*.yml" - pivnet-product-slug: ops-manager - product-version-regex: ^2\.5\.\d+$ - ``` -=== "GCP" - ```yaml - --- - pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "ops-manager-gcp*.yml" - pivnet-product-slug: ops-manager - product-version-regex: ^2\.5\.\d+$ - ``` -=== "OpenStack" - ```yaml - --- - pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "ops-manager-openstack*.raw" - pivnet-product-slug: ops-manager - product-version-regex: ^2\.5\.\d+$ - ``` -=== "vSphere" - ```yaml - --- - pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "ops-manager-vsphere*.ova" - pivnet-product-slug: ops-manager - product-version-regex: ^2\.5\.\d+$ - ``` \ No newline at end of file diff --git a/docs/how-to-guides/.download-tas-tabs.md b/docs/how-to-guides/_download-tas-tabs.html.md.erb similarity index 81% rename from docs/how-to-guides/.download-tas-tabs.md rename to docs/how-to-guides/_download-tas-tabs.html.md.erb index 9f53ebd7..5359edef 100644 --- a/docs/how-to-guides/.download-tas-tabs.md +++ b/docs/how-to-guides/_download-tas-tabs.html.md.erb @@ -1,49 +1,54 @@ -=== "AWS" - ```yaml +

AWS

+ +```yaml --- pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TAS + pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TPCF pivnet-product-slug: elastic-runtime product-version-regex: ^2\.9\..*$ stemcell-iaas: aws - ``` +``` + +

Azure

-=== "Azure" - ```yaml +```yaml --- pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TAS + pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TPCF pivnet-product-slug: elastic-runtime product-version-regex: ^2\.9\..*$ stemcell-iaas: azure - ``` +``` + +

GCP

-=== "GCP" - ```yaml +```yaml --- pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TAS + pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TPCF pivnet-product-slug: elastic-runtime product-version-regex: ^2\.9\..*$ stemcell-iaas: google - ``` +``` -=== "OpenStack" - ```yaml +

OpepnStack

+ +```yaml --- pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TAS + pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TPCF pivnet-product-slug: elastic-runtime product-version-regex: ^2\.9\..*$ stemcell-iaas: openstack - ``` +``` + +

vSphere

-=== "vSphere" - ```yaml +```yaml --- pivnet-api-token: ((pivnet_token)) - pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TAS + pivnet-file-glob: "*srt*.pivotal" # this guide installs Small Footprint TPCF pivnet-product-slug: elastic-runtime product-version-regex: ^2\.9\..*$ stemcell-iaas: vsphere - ``` +``` diff --git a/docs/how-to-guides/_getting-started.html.md.erb b/docs/how-to-guides/_getting-started.html.md.erb new file mode 100644 index 00000000..62699df3 --- /dev/null +++ b/docs/how-to-guides/_getting-started.html.md.erb @@ -0,0 +1,481 @@ +## Prerequisites + +Over the course of this guide, +you will use Platform Automation Toolkit +to create a [pipeline](https://concourse-ci.org/pipelines.html) +using [Concourse](https://concourse-ci.org/). + +You need: + +1. For upgrade only: A running Tanzu Operations Manager VM that you would like to upgrade +2. Credentials for an IaaS that Tanzu Operations Manager is compatible with + - It doesn't matter what IaaS you use for Tanzu Operations Manager, + as long as your Concourse can connect to it. + Pipelines built with Platform Automation Toolkit can be platform-agnostic. +3. A Concourse instance + with access to a CredHub instance + and to the Internet +4. A GitHub account +5. Read/write credentials and bucket name for an S3 bucket +6. An account on the [Broadcom Support portal](https://support.broadcom.com/group/ecx/downloads) +7. A MacOS workstation with: + - a text editor of your choice + - a terminal emulator of your choice + - a browser that works with Concourse, like Firefox or Chrome + - `git`installed + - Docker installed + +It will be very helpful to have a basic familiarity with the following. If you don't have basic familiarity with all these things, +you will fine some basics explained here, along with links to resources to learn more: + +- the bash terminal +- [git](https://git-scm.com/) +- [YAML](https://learnxinyminutes.com/docs/yaml/) +- [Concourse](https://concourse-ci.org/) + +

+While this guide uses GitHub to provide a git remote, +and an S3 bucket as a blobstore, +Platform Automation Toolkit supports arbitrary git providers +and S3-compatible blobstores. +
+Specific examples are described in some detail, but +if you follow along with different providers +some details may be different. +Also see Setting up S3 for file storage. +
+Similarly, in this guide, MacOS is assumed, but +Linux should work well, too. +Keep in mind that there might be differences in the paths that +you will need to figure out.

+ +## Creating a Concourse pipeline + +Platform Automation Toolkit's tasks and image are meant to be used in a Concourse pipeline. + +Using your bash command-line client, +create a directory to keep your pipeline files in, and `cd` into it. + +```bash +mkdir your-repo-name +cd !$ +``` + +This repo name should relate to your situation +and be specific enough to be navigable from your local workstation. + +

+!$ is a bash shortcut. +Pronounced "bang, dollar-sign," +it means "use the last argument from the most recent command." +In this case, that's the directory you just created.

+ +### Gather variables to use in the pipeline (for upgrade only) +If you are upgrading, continue with the following. If not, skip to [Creating a pipeline](#creating-a-pipeline). + +Before getting started with the pipeline, +gather some variables in a file that +you can use throughout your pipeline. + +Open your text editor and create `vars.yml`. +Here's what it should look like to start. You can add more variables as you go: + +```yaml +platform-automation-bucket: your-bucket-name +credhub-server: https://your-credhub.example.com +opsman-url: https://pcf.foundation.example.com +``` + +

+This example assumes that that you are using DNS and host names. +You can use IP addresses for all these resources instead, +but you still need to provide the information as a URL, +for example: https://120.121.123.124.

+ +### Creating a pipeline + +Create a file called `pipeline.yml`. + +The examples in this guide use `pipeline.yml`, but +you might create multiple pipelines over time. +If there's a more sensible name for the pipeline you're working on, +feel free to use that instead. + +Start the file as shown here. This is [YAML](https://learnxinyminutes.com/docs/yaml/) for "the start of the document." It's optional, but traditional: + +```yaml + +--- +``` + +Now you have a valid YAML pipeline file. + +### Getting fly + +First, try to set your new YAML file as a pipeline with [`fly`](https://concourse-ci.org/fly.html), +the Concourse command-line Interface (CLI). + +To check if you have `fly` installed: + +```bash +fly -v +``` + +If it returns a version number, you're ready for the next steps. +Skip ahead to [Setting the pipeline](#setting-the-pipeline) + +If it says something like `-bash: fly: command not found`, +you need to get `fly`. + +Navigate to the address for your Concourse instance in a web browser. +At this point, you don't need to be signed in. +If there are no public pipelines, you should see something like this: + +![Get Fly](../img/concourse-fly-download.png) + +If there are public pipelines, +or if you're signed in and there are pipelines you can see, +you'll see something similar in the lower-right hand corner. + +Click the icon for your OS and save the file, move +(`mv`) the resulting file to somewhere in your `$PATH`, +and use `chmod` to make it executable: + +

+About command-line examples: +In some cases, you can copy-paste the examples directly into your terminal. +Some of them won't work that way, +or even if they did, would require you to edit them to replace our example values +with your actual values. +Best practice is to type all of the bash examples by hand, +substituting values, if necessary, as you go. +Don't forget that you can often hit the tab key +to auto-complete the names of files that already exist; +it makes all that typing just a little easier, +and serves as a sort of command-line autocorrect.

+ +Type the following into your terminal to get `fly`. + +```bash +mv ~/Downloads/fly /usr/local/bin/fly +chmod +x !$ +``` + +This means that you downloaded the `fly` binary, +and moved it into the bash PATH, +which is where bash looks for things to execute +when you type a command. +Then you added permissions that allow it to be executed (`+x`). +Now, the CLI is installed, you won't have to do it again, +because `fly` has the ability to update itself, +(which is be described in more detail is a later section). + +### Setting the pipeline + +Now set your pipeline with `fly`, the Concourse CLI. + +`fly` keeps a list of Concourses it knows how to talk to. +To find out if the Concourse you need is already on the list, type: + +```bash +fly targets +``` + +If you see the address of the Concourse you want to use in the list, +note its name, and use it in the login command. The examples in this book use the Concourse +name `control-plane`. + +```bash +fly -t control-plane login +``` + +If you don't see the Concourse you need, you can add it with the `-c` (`--concourse-url`)flag: + +```bash +fly -t control-plane login -c https://your-concourse.example.com +``` + +You should see a login link you can click +to complete login from your browser. + +

+The -t flag sets the name when used with login and -c. +In the future, you can leave out the -c argument. +
+If you ever want to know what a short flag stands for, +you can run the command with -h (--help) at the end.

+ +Time to set the pipeline. +The example here use the name "foundation" for this pipeline, +but if your foundation has a name, use that instead. + +```bash +fly -t control-plane set-pipeline -p foundation -c pipeline.yml +``` + +It should say `no changes to apply`, +which is expected, since the `pipeline.yml` file is still empty. + +

+If fly says something about a "version discrepancy," +"significant" or otherwise, run fly sync and try again. +fly sync automatically updates the CLI +with the version that matches the Concourse you're targeting.

+ +### Your first job + +Before running your pipeline the first time, turn your directory into a git repository. + +This allows to reverting edits to your pipeline as needed. +This is one of many reasons you should keep your pipeline under version control. + +#### But first, git init + +This section describes a step-by-step approach for getting set up with git. + +For an example of the repository file structure +for single and multiple foundation systems, +see Why use Git and GitHub? + +1. Run `git init`. `git` should come back with information about the commit you just created: + + ```bash + git init + git commit --allow-empty -m "Empty initial commit" + ``` + + If this gives you a config error instead, + you might need to configure `git` first. + See [First-Time Git Setup](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) + to complete the initial setup. + When you have finished going through the steps in this guide, try again. + +1. Now add your `pipeline.yml`. + + ```bash + git add pipeline.yml + git commit -m "Add pipeline" + ``` + + If you are performing an upgrade, use this instead: + + ```bash + git add pipeline.yml vars.yml + git commit -m "Add pipeline and starter vars" + ``` + +1. Check that everything is tidy: + + ```bash + git status + ``` + + `git` should return `nothing to commit, working tree clean`. + +When this is done, you can safely make changes. + +

+git commits are the basic unit of code history. +Making frequent, small, commits with good commit messages +makes it much easier to figure out why things are the way they are, +and to return to the way things were in simpler, better times. +Writing short commit messages that capture the intent of the change +really does make the pipeline history much more legible, +both to future-you, +and to current and future teammates and collaborators.

+ +#### The test task + +Platform Automation Toolkit comes with a [`test`](../tasks.html#test) task you +can use to validate that it's been installed correctly. + +1. Add the following to your `pipeline.yml`, starting on the line after the `---`: + + ```yaml + jobs: + - name: test + plan: + - task: test + image: platform-automation-image + file: platform-automation-tasks/tasks/test.yml + ``` + +1. Try to set the pipeline now. + + ```bash + fly -t control-plane set-pipeline -p foundation -c pipeline.yml + ``` + + Now you should be able to see your pipeline + in the Concourse UI. + It starts in the paused state, so click the play button to unpause it. + Then click in to the gray box for the `test` job, + and click the plus (**+**) button to schedule a build. + + It should return an error immediately, with `unknown artifact source: platform-automation-tasks`. + This is because there isn't a source for the task file yet. + + This preparation has resulted in a pipeline code that Concourse accepts. + +1. Before starting the next step, make a commit: + + ```bash + git add pipeline.yml + git commit -m "Add (nonfunctional) test task" + ``` + +1. Get the inputs you need by adding `get` steps to the plan +before the task, as shown here: + + ```yaml hl_lines="4-13" + jobs: + - name: test + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - task: test + image: platform-automation-image + file: platform-automation-tasks/tasks/test.yml + ``` + +

+ There is a smaller vSphere container image available. + To use it instead of the general purpose image, + you can use this glob to get the image: +
+ + - get: platform-automation-image + resource: platform-automation + params: + globs: ["vsphere-platform-automation-image*.tar.gz"] + unpack: true + +

+ +1. Next, you might try to `fly set` this new pipeline. At this stage, you will see that it is not ready yet, +and `fly` will return a message about invalid resources. + + This is because you need to make the `image` and `file` available, so + you need to set up some Resources. + +#### Adding resources + +Resources are Concourse's main approach to managing artifacts. +You need an image and the tasks directory, +so you need to tell Concourse how to get these things by declaring Resources for them. + +In this case, you will download the image and the tasks directory from the [Broadcom Support portal](https://support.broadcom.com/group/ecx/downloads). +Before you can declare the resources themselves, +you must teach Concourse to talk to the Broadcom Support portal. +(Many resource types are built in, but this one isn't.) + +1. Add the following to your pipeline file, above the `jobs` entry. + + ```yaml + resource_types: + - name: pivnet + type: docker-image + source: + repository: pivotalcf/pivnet-resource + tag: latest-final + resources: + - name: platform-automation + type: pivnet + source: + product_slug: platform-automation + api_token: ((pivnet-refresh-token)) + ``` + + The API token is a credential, + which you pass in using the command-line when setting the pipeline, + You don't want to accidentally check it in. + +

+ Bash commands that start with a space character + are not saved in your history. + This can be very useful for cases like this, + where you want to pass a secret, + but you don't want it saved. + Commands in this guide that contain a secret + start with a space, which can be easy to miss.

+ +2. Get a refresh token from your Broadcom Support profile +(when logged in, click your user name, then **Edit Profile**) +and click **Request New Refresh Token**.) + Then use that token in the following command: + + ```bash + # note the space before the command + fly -t control-plane set-pipeline \ + -p foundation \ + -c pipeline.yml \ + -v pivnet-refresh-token=your-api-token + ``` + +

+ When you get your Broadcom Support token as described above, + any previous Broadcom Support tokens you have stop working. + If you're using your Broadcom Support refresh token anywhere, + retrieve it from your existing secret storage rather than getting a new one, + or you'll end up needing to update it everywhere it's used.

+ +1. Go back to the Concourse UI and trigger another build. This time, it should pass. + +2. Now it's time to commit. + + ```bash + git add pipeline.yml + git commit -m "Add resources needed for test task" + ``` + +3. It's better not to pass the Broadcom Support token +every time you need to set the pipeline. +Fortunately, Concourse can integrate +with secret storage services, like CredHub. In this step, put the API token in CredHub so Concourse can get it. + +

+ Backslashes in bash examples: + The following example has been broken across multiple lines + by using backslash characters (\) to escape the newlines. + The backslash is used in here to keep the examples readable. + When you're typing these out, + you can skip the backslashes and put it all on one line.

+ +1. First, log in. Again, note the space at the start. + + <%= partial "logging-into-credhub" %> + +2. Next, set the credential name +to the path [where Concourse will look for it](https://concourse-ci.org/credhub-credential-manager.html#credential-lookup-rules): + + ```bash + # note the starting space + credhub set \ + --name /concourse/your-team-name/pivnet-refresh-token \ + --type value \ + --value your-credhub-refresh-token + ``` + +1. Now, set the pipeline again, +without passing a secret this time. + + ```bash + fly -t control-plane set-pipeline \ + -p foundation \ + -c pipeline.yml + ``` + + This should succeed, + and the diff Concourse shows you should replace the literal credential + with `((pivnet-refresh-token))`. + +1. Go to the UI again and re-run the test job; +this should also succeed. diff --git a/docs/how-to-guides/_logging-into-credhub.html.md.erb b/docs/how-to-guides/_logging-into-credhub.html.md.erb new file mode 100644 index 00000000..c7dbce33 --- /dev/null +++ b/docs/how-to-guides/_logging-into-credhub.html.md.erb @@ -0,0 +1,21 @@ + ```bash + # note the starting space + credhub login --server example.com \ + --client-name your-client-id \ + --client-secret your-client-secret + ``` + +

+ Depending on your credential type, + you may need to pass client-id and client-secret, + as we do above, or username and password. + We use the client approach because that's the credential type + that automation should usually be working with. + Nominally, a username represents a person, + and a client represents a system; + this isn't always exactly how things are in practice. + Use whichever type of credential you have in your case. + Note that if you exclude either set of flags, + CredHub will interactively prompt for username and password, + and hide the characters of your password when you type them. + This method of entry can be better in some situations.

diff --git a/docs/how-to-guides/_opsman-config-tabs.html.md.erb b/docs/how-to-guides/_opsman-config-tabs.html.md.erb new file mode 100644 index 00000000..53999956 --- /dev/null +++ b/docs/how-to-guides/_opsman-config-tabs.html.md.erb @@ -0,0 +1,46 @@ +

AWS

+ +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-file-glob: "ops-manager-aws*.yml" +pivnet-product-slug: ops-manager +product-version-regex: ^2\.5\.\d+$ +``` +

Azure

+ +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-file-glob: "ops-manager-azure*.yml" +pivnet-product-slug: ops-manager +product-version-regex: ^2\.5\.\d+$ +``` +

GCP

+ +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-file-glob: "ops-manager-gcp*.yml" +pivnet-product-slug: ops-manager +product-version-regex: ^2\.5\.\d+$ +``` +

OpenStack

+ +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-file-glob: "ops-manager-openstack*.raw" +pivnet-product-slug: ops-manager +product-version-regex: ^2\.5\.\d+$ +``` +

vSphere

+ +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-file-glob: "ops-manager-vsphere*.ova" +pivnet-product-slug: ops-manager +product-version-regex: ^2\.5\.\d+$ +``` +

diff --git a/docs/how-to-guides/.paths-and-pipeline-names.md b/docs/how-to-guides/_paths-and-pipeline-names.html.md.erb similarity index 52% rename from docs/how-to-guides/.paths-and-pipeline-names.md rename to docs/how-to-guides/_paths-and-pipeline-names.html.md.erb index 97b0acdd..045d1487 100644 --- a/docs/how-to-guides/.paths-and-pipeline-names.md +++ b/docs/how-to-guides/_paths-and-pipeline-names.html.md.erb @@ -1,15 +1,13 @@ -!!! info "Credhub paths and pipeline names" - - Notice that we've added an element to the cred paths; - now we're using the foundation name. - - If you look at [Concourse's lookup rules,][concourse-credhub-lookup-rules] +

+ Notice the additional element to the cred paths; + the foundation name. +
+ If you look at Concourse lookup rules, you'll see that it searches the pipeline-specific path before the team path. Since our pipeline is named for the foundation it's used to manage, we can use this to scope access to our foundation-specific information to just this pipeline. - +
By contrast, the Tanzu Network token may be valuable across several pipelines - (and associated foundations), - so we scoped that to our team. \ No newline at end of file + (and associated foundations), so we scoped that to our team.

diff --git a/docs/how-to-guides/adding-a-product.html.md.erb b/docs/how-to-guides/adding-a-product.html.md.erb new file mode 100644 index 00000000..d905e369 --- /dev/null +++ b/docs/how-to-guides/adding-a-product.html.md.erb @@ -0,0 +1,843 @@ +# Extending a pipeline to install a product + +This topic will teach you how to add a product to an existing pipeline. +This includes downloading the product from the Broadcom Support portal, +extracting the configuration, +and installing the configured product. + +## Prerequisites + +1. A pipeline, such as one created in [Installing Tanzu Operations Manager](./installing-opsman.html) + or [Upgrading an existing Tanzu Operations Manager](./upgrade-existing-opsman.html). +2. A fully configured Tanzu Operations Manager and Director. See [Creating a director config file](./creating-a-director-config-file.html). +3. The Platform Automation Toolkit Docker Image imported and ready to run. See [Running commands locally](./running-commands-locally.html). +4. A glob pattern uniquely matching one product file on the Broadcom Support portal. + +### Assumptions about your existing pipeline + +This guide assumes that you are working +from one of the pipelines created in [Installing Tanzu Operations Manager](./installing-opsman.html) or [Upgrading an existing Tanzu Operations Manager](./upgrade-existing-opsman.html), +but you don't have to have exactly that pipeline. +If your pipeline is different, though, +you may run into trouble with some of the assumptions made here: + +- Resource declarations for `config` and `platform-automation`. +- A pivnet token stored in CredHub as a credential named `pivnet_token`. +- A previous job responsible for deploying the director, + called `apply-director-changes`. +- You have an `env.yml` based on the instructions in [Configuring Env](./configuring-env.html). This file exists in the `configuration` resource. +- You have a `fly` target named `control-plane`, with an existing pipeline called `foundation`. +- You have a source control repo that contains the `foundation` pipeline's `pipeline.yml`. + +You should be able to use the pipeline YAML in this document with any pipeline, +as long as you make sure the names in the assumptions list match what's in your pipeline, +either by changing the example YAML or your pipeline. + +## Download, upload, and stage product to Tanzu Operations Manager + +The instructions and example in the following add the [VMware Tanzu Platform for Cloud Foundry](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Tanzu%20Platform%20for%20Cloud%20Foundry) product. + +### Download + +Before setting the pipeline, create a config file for [`download-product`](../tasks.html#download-product) +to download Tanzu Platform for Cloud Foundry from the Broadcom Support portal. + +Create a `download-tas.yml` file for the IaaS you are using. + +<%= partial "download-tas-tabs" %> + +1. Add and commit this file to the same directory as the previous guides. + This file should be accessible from the `configuration` resource. + + ```bash + git add download-tas.yml + git commit -m "Add download-tas file for foundation" + git push + ``` + +2. Now that you have a config file, +you can add a new `download-upload-and-stage-tas` job in your `pipeline.yml` file. + + ```yaml hl_lines="3-32" + jobs: # Do not duplicate this if it already exists in your pipeline.yml, + # just add the following lines to the jobs section + - name: download-upload-and-stage-tas + serial: true + plan: + - aggregate: + - get: platform-automation-image + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: config + params: + CONFIG_FILE: download-tas.yml + output_mapping: + downloaded-product: tas-product + downloaded-stemcell: tas-stemcell + ``` + +1. Commit your changes. + + ```bash + git add pipeline.yml + git commit -m 'download TPCF and its stemcell' + ``` + +2. Now, set the pipeline + + ```bash + fly -t control-plane set-pipeline -p foundation -c pipeline.yml + ``` + +3. If the pipeline sets without errors, run a `git push` of the config. + +4. If fly set-pipeline returns an error, +fix any and all errors until the pipeline can be set. +When the pipeline can be set properly, run: + + ``` + git add pipeline.yml + git commit --amend --no-edit + git push + ``` + +

+ Testing your pipeline: + We generally want to try things out right away to see if they're working right. + However, in this case, if you have a very slow internet connection and/or multiple Concourse workers, + you might want to hold off until we've got the job doing more, + so that if it works, you don't have to wait for the download again.

+ +### Upload and stage + +1. Now that you have a product downloaded and (potentially) cached on a Concourse worker, +upload and stage the new product to Tanzu Operations Manager. + + ```yaml hl_lines="32-45" + jobs: + - name: download-upload-and-stage-tas + serial: true + plan: + - aggregate: + - get: platform-automation-image + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: config + params: + CONFIG_FILE: download-tas.yml + output_mapping: + downloaded-product: tas-product + downloaded-stemcell: tas-stemcell + - task: upload-tas-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-stemcell.yml + input_mapping: + env: config + stemcell: tas-stemcell + params: + ENV_FILE: env.yml + - task: upload-and-stage-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-and-stage-product.yml + input_mapping: + product: tas-product + env: config + ``` + +1. Re-set the pipeline. + + ```bash + fly -t control-plane set-pipeline -p foundation -c pipeline.yml + ``` + +1. When this finishes successfully, make a commit and push the changes. + + ```bash + git add pipeline.yml + git commit -m 'upload tas and stemcell to Ops Manager' + git push + ``` + +## Product configuration + +Before automating the configuration and installation of the product, +add a config file. +The simplest way to do this is to choose your config options in the Tanzu Operations Manager UI, +and then pull its resulting configuration. + +

+Advanced Tile Config Option: +For an alternative that generates the configuration +from the product file, using ops files to select options, +see Config template. +

+ +### Pulling Configuration from Tanzu Operations Manager + +Configure the product _manually_ according to the product's installation instructions. +Use the installation instructions in the [VMware Tanzu Platform for Cloud Foundry documentation](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-platform-for-cloud-foundry/10-0/tpcf/toc-installing-index.html). + +After the product is fully configured, apply the changes (**Apply Changes**) in the Tanzu Operations Manager UI, +and then continue this guide. + +

+If you do not click Apply Changes, +Tanzu Operations Manager cannot generate credentials. +You can still go through this process without an initial applying changes, +but you will be unable to use om staged-config with --include-credentials, +and may have an incomplete configuration at the end of this process.

+ +[`om`](https://github.com/pivotal-cf/om) has a command called [staged-config](../tasks.html#staged-config). +It is used to extract staged product +configuration from the Tanzu Operations Manager UI. +`om` requires a `env.yml`, which is available. It was used in the `upload-and-stage` task. + +Most products will contain the following top-level keys: + +- network-properties +- product-properties +- resource-config + +The command can be run directly using Docker: +1. Download the image to our local workstation. +1. Import the image into Docker. +2. Run `staged-config` for the [Tanzu Platform for Cloud Foundry](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-platform-for-cloud-foundry/10-0/tpcf/toc-installing-index.html) product. +For more information, see [Running commands locally](./running-commands-locally.html). + +To pull the configuration from Tanzu Operations Manager: + +1. Download the image from the [Broadcom Support portal](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Platform%20Automation%20Toolkit). + +2. Import the image. + + ```bash + export ENV_FILE=env.yml + docker import ${PLATFORM_AUTOMATION_IMAGE_TGZ} platform-automation-image + ``` + +3. Run `om staged-products` to find the name of the product in Tanzu Operations Manager. + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + om --env ${ENV_FILE} staged-products + ``` + + The result should be a table that looks like the following + ```text + +---------------------------+-----------------+ + | NAME | VERSION | + +---------------------------+-----------------+ + | cf | | + | p-bosh | | + +---------------------------+-----------------+ + ``` + + `p-bosh` is the name of the director. + As `cf` is the only other product on our Tanzu Operations Manager, + we can safely assume that this is the product name for [Tanzu Platform for Cloud Foundry](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Tanzu%20Platform%20for%20Cloud%20Foundry). + +4. Using the product name `cf`, +extract the current configuration from Tanzu Operations Manager. + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + om --env ${ENV_FILE} staged-config --include-credentials --product-name cf > tas-config.yml + ``` + +Now you have a configuration file for our tile ready to back up (almost). +There are a few more steps required before you are ready to commit. + +#### Parameterizing the config + +Look through your `tas-config.yml` for any sensitive values. +These values should be `((parameterized))` +and saved off in a secrets store (in this example, we use CredHub). + +1. Log in to CredHub, if you are not already logged in. +Be sure to note the space at the beginning of the line. +This will ensure your valuable secrets are not saved in terminal history. + + <%= partial "logging-into-credhub" %> + + The example list of some sensitive values from our `tas-config.yml` are as follows, + note that this is intentionally incomplete. + ```yaml + product-properties: + .properties.cloud_controller.encrypt_key: + value: + secret: my-super-secure-secret + .properties.networking_poe_ssl_certs: + value: + - certificate: + cert_pem: |- + -----BEGIN CERTIFICATE----- + my-cert + -----END CERTIFICATE----- + private_key_pem: |- + -----BEGIN RSA PRIVATE KEY----- + my-private-key + -----END RSA PRIVATE KEY----- + name: certificate + ``` + +1. Start with the Cloud Controller encrypt key because +this is a value that you might want to rotate at some point. +Store it as a `password` type in CredHub. + + ```bash + # note the starting space + credhub set \ + --name /concourse/your-team-name/cloud_controller_encrypt_key \ + --type password \ + --password my-super-secure-secret + ``` + +1. To validate that you have set this correctly, run: + + ```bash + # no need for an extra space + credhub get --name /concourse/your-team-name/cloud_controller_encrypt_key + ``` + + Expect an output like this: + + ```text + id: + name: /concourse/your-team-name/cloud_controller_encrypt_key + type: password + value: my-super-secure-secret + version_created_at: "" + ``` + +1. In preparation for storing the Networking POE certs +as a `certificate` type in CredHub, +save the certificate and private key +as plain text files. +In this example, these files are named `poe-cert.txt` and `poe-private-key.txt`. +There should be no formatting or indentation in these files, only new lines. + + ```bash + # note the starting space + credhub set \ + --name /concourse/your-team-name/networking_poe_ssl_certs \ + --type rsa \ + --public poe-cert.txt \ + --private poe-private-key.txt + ``` + +1. Validate that these are set correctly. + + ```bash + # no need for an extra space + credhub get --name /concourse/your-team-name/networking_poe_ssl_certs + ``` + + The output should look like this: + + ```text + id: + name: /concourse/your-team-name/networking_poe_ssl_certs + type: rsa + value: + private_key: | + -----BEGIN RSA PRIVATE KEY----- + my-private-key + -----END RSA PRIVATE KEY----- + public_key: | + -----BEGIN CERTIFICATE----- + my-cert + -----END CERTIFICATE----- + version_created_at: "" + ``` + +

+ Remove credentials from disk: + Once you have validated that the certificates are set correctly in CredHub, + remember to delete poe-cert.txt and poe-private-key.txt from your working directory. + This will prevent a potential security leak + or an accidental commit of those credentials.

+ +1. Repeat this process for all sensitive values in your `tas-config.yml`. + +1. After this is complete, you can remove those secrets from `tas-config.yml` +and replace them with `((parameterized-values))`. +The parameterized value name should match the name in CredHub. +For this example, it looks like this: + + ```yaml + product-properties: + .properties.cloud_controller.encrypt_key: + value: + secret: ((cloud_controller_encrypt_key)) + .properties.networking_poe_ssl_certs: + value: + - certificate: + cert_pem: ((networking_poe_ssl_certs.public_key)) + private_key_pem: ((networking_poe_ssl_certs.private_key)) + name: certificate + ``` + +1. When this is ready; that is, `tas-config.yml` is parameterized to your liking, +commit the config file. + + ```bash + git add tas-config.yml + git commit -m "Add tas-config file for foundation" + git push + ``` + +## Configure and apply + +Now you can configure the product and apply changes. + +1. First, update the pipeline +to have a configure-product step. + + ```yaml hl_lines="46-76" + jobs: + - name: download-upload-and-stage-tas + serial: true + plan: + - aggregate: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: config + params: + CONFIG_FILE: download-tas.yml + output_mapping: + downloaded-product: tas-product + downloaded-stemcell: tas-stemcell + - task: upload-tas-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-stemcell.yml + input_mapping: + env: config + stemcell: tas-stemcell + params: + ENV_FILE: env/env.yml + - task: upload-and-stage-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/stage-product.yml + input_mapping: + product: tas-product + env: config + - name: configure-tas + serial: true + plan: + - aggregate: + - get: platform-automation-image + passed: [download-upload-and-stage-tas] + trigger: true + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + passed: [download-upload-and-stage-tas] + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: configure-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: config + env: config + params: + CONFIG_FILE: tas-config.yml + ``` + + This new job will configure the Tanzu Platform for Cloud Foundry product + with the config file we previously created. + +2. Add an `apply-changes` job +so that these changes will be applied by the Tanzu Operations Manager. + + ```yaml hl_lines="31-56" + - name: configure-tas + serial: true + plan: + - aggregate: + - get: platform-automation-image + trigger: true + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + passed: [download-upload-and-stage-tas] + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: configure-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: config + env: config + params: + CONFIG_FILE: tas-config.yml + - name: apply-changes + serial: true + plan: + - aggregate: + - get: platform-automation-image + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + passed: [configure-tas] + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: apply-changes + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + input_mapping: + env: config + ``` + +

+ Adding multiple products: + When adding multiple products, you can add the configure jobs as passed constraints + to the apply-changes job so that they all are applied at once. + Tanzu Operations Manager will handle any inter-product dependency ordering. + This will speed up your apply changes + when compared with running apply changes for each product separately. +
+ Example: + passed: [configure-tas, configure-tas-windows, configure-healthwatch] +

+ +1. Set the pipeline one final time, +run the job, and confirm that it passes. + + ```bash + fly -t control-plane set-pipeline -p foundation -c pipeline.yml + ``` + +1. Commit the final changes to your repository. + + ```bash + git add pipeline.yml + git commit -m "configure-tas and apply-changes" + git push + ``` + +You have now successfully added a product to your automation pipeline. + +## Advanced concepts +### Config template + +An alternative to the staged-config workflow +outlined in these examples is `config-template`. + +`config-template` is an `om` command that creates a base config file with optional ops files +from a given tile or pivnet slug. + +This section assumes that you are adding +[Tanzu Platform for Cloud FOundry](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Tanzu%20Platform%20for%20Cloud%20Foundry), +as in the procedure above. + +#### Generate the config template directory + +```bash +# note the leading space + export PIVNET_API_TOKEN='your-vmware-tanzu-network-api-token' +``` + +```bash +docker run -it -v $HOME/configs:/configs platform-automation-image \ +om config-template \ + --output-directory /configs/ \ + --pivnet-api-token "${PIVNET_API_TOKEN}" \ + --pivnet-product-slug elastic-runtime \ + --product-version '2.5.0' \ + --product-file-glob 'cf*.pivotal' # Only necessary if the product has multiple .pivotal files +``` + +This series of commands creates or updates a directory at `$HOME/configs/cf/2.5.0/`. + +`cd` into the directory to get started creating your config. + +#### Interpolate a Config + +In the directory, you'll see a `product.yml` file. +This is the template for the product configuration you're about to build. +Open it in an editor of your choice. +Get familiar with the file's contents. +The values are variables intended to be interpolated from other sources, +designated with the`(())` syntax. + +You can find the value for any property with a default in the `product-default-vars.yml` file. +This file serves as a good example of a variable source. + +1. Create a vars file of your own for variables without default values. +For the base template, you can get a list of required variables by running: + + ```bash + docker run -it -v $HOME/configs:/configs platform-automation-image \ + om interpolate \ + --config product.yml \ + -l product-default-vars.yml \ + -l resource-vars.yml \ + -l errand-vars.yml + ``` + +1. Put these vars in a file and give them the appropriate values. +After you've included all the variables, +the output will be the finished template. +The rest of this guide refers to these vars as `required-vars.yml`. + +There may be situations that call for splitting your vars across multiple files. +This can be useful if there are vars that need to be interpolated when you apply the configuration, +rather than when you create the final template. +You might consider creating a separate vars file for each of the following cases: + +- credentials (These vars can then be persisted separately/securely. See [Using a secrets store to store credentials](../concepts/secrets-handling.html)) +- foundation-specific variables when using the same template for multiple foundations + +When creating your final template +using `om interpolate`, you can use the `--skip-missing` flag to leave such vars to be rendered later. + +If you're having trouble figuring out what the values should be, +here are some approaches you can use: + +- Look in the template where the variable appears for some additional context of its value. +- Look at the tile's online documentation +- Upload the tile to a Tanzu Operations Manager + and visit the tile in the Tanzu Operations Manager UI to see if that provides any hints. + + If you are still struggling, inspect the HTML of the Tanzu Operations Manager web page + to help you map the value names to the associated UI elements. + +

+When using the Tanzu Operations Manager docs and UI, +be aware that the field names in the UI do not necessarily map directly to property names.

+ +#### Optional features + +The above process will get you a default installation, +with no optional features or variables, +that is entirely deployed in a single Availability Zone (AZ). + +To provide non-required variables, +use multiple AZs, +or make non-default selections for some options, +use some of the ops files in one of the following four directories: + + + + + + + + + + + + + + + + + + +
featuresAllow the enabling of selectors for a product; for example, enabling/disabling of an s3 bucket
networkContains options for enabling 2-3 availability zones for network configuration
optionalContains optional properties without defaults. For optional values that can be provided more than once, there's an ops file for each param count.
resourceContains configuration that can be applied to resource configuration; for example, BOSH VM extensions
+ +For more information on BOSH VM Extensions, see [Creating a director config file](./creating-a-director-config-file.html#vm-extensions). + +To use an ops file, add `-o` +with the path to the ops file you want to use to your `interpolate` command. + +So, to enable TCP routing in Tanzu Platform for Cloud Foundry, add `-o features/tcp_routing-enable.yml`. +For the rest of this guide, the vars for this feature +are referred to as `feature-vars.yml`. +If you run your complete command, you should again get a list of any newly-required variables. + +```bash +docker run -it -v $HOME/configs:/configs platform-automation-image \ +om interpolate \ + --config product.yml \ + -l product-default-vars.yml \ + -l resource-vars.yml \ + -l required-vars.yml \ + -o features/tcp_routing-enable.yml \ + -l feature-vars.yml \ + -l errand-vars.yml +``` + +#### Finalize your configuration + +After selecting your ops files and created your vars files, +decide which vars you want in the template +and which you want to have interpolated later. + +Create a final template and write it to a file, +using only the vars you want in the template, +and using `--skip-missing` to allow the rest to remain as variables. + +```bash +docker run -it -v $HOME/configs:/configs platform-automation-image \ +om interpolate \ + --config product.yml \ + -l product-default-vars.yml \ + -l resource-vars.yml \ + -l required-vars.yml \ + -o features/tcp_routing-enable.yml \ + -l feature-vars.yml \ + -l errand-vars.yml \ + --skip-missing \ + > pas-config-template.yml +``` + +You can check the resulting configuration into a git repo. +For vars that do not include credentials, you can check those vars files in, too. +Handle vars that are secret more carefully. See [Using a secrets store to store credentials](../concepts/secrets-handling.html). + +You can then delete the config template directory. + +## Using ops files for multi-foundation + +There are two recommended ways to support multiple foundation workflows: + +* Using [secrets management](../concepts/secrets-handling.html#multi-foundation-secrets-handling) +* Using ops files + +This section explains how to support multiple foundations using ops files. + +Starting with an incomplete [Tanzu Platform for Cloud Foundry](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Tanzu%20Platform%20for%20Cloud%20Foundry) config from **vSphere** as an example: + +<%= partial "cf-partial-config" %> + +For a single foundation deployment, leaving values such as +`".cloud_controller.apps_domain"` as they are works fine. For multiple +foundations, this value will be different for each deployed foundation. Other values, +such as `.cloud_controller.encrypt_key`, have a secret that +already has a placeholder from `om`. If different foundations have different +load requirements, the values in `resource-config` can also be edited using +[ops files](https://bosh.io/docs/cli-ops-files/). + +1. Using the earlier example, fill in the existing placeholder for +`cloud_controller.apps_domain` in the first foundation. + + ```yaml + # replace-domain-ops-file.yml + - type: replace + path: /product-properties/.cloud_controller.apps_domain/value? + value: unique.foundation.one.domain + ``` + +1. To test that the ops file works in your `base.yml`, do this locally using `bosh int`: + + ```bash + bosh int base.yml -o replace-domain.yml + ``` + +The following code returns `base.yml` with the replaced (interpolated) values: + +<%= partial "cf-partial-config-domain-interpolated" %> + +Anything that needs to be different per deployment can be replaced using ops files as long as the `path:` is correct. + +### Additional notes + +Upgrading products to new patch versions: + +* Configuration settings should not differ between successive patch versions in the same minor version line. + Underlying properties or property names may change, + but the tile's upgrade process automatically translates properties to the new fields and values. +* VMware cannot guarantee the functionality of upgrade scripts in third-party products. + +Replicating configuration settings from one product to the same product on a different foundation: + +* Because properties and property names can change between patch versions of a product, + you can only safely apply configuration settings across products if their versions exactly match. diff --git a/docs/how-to-guides/adding-a-product.md b/docs/how-to-guides/adding-a-product.md deleted file mode 100644 index 37d904b0..00000000 --- a/docs/how-to-guides/adding-a-product.md +++ /dev/null @@ -1,829 +0,0 @@ -# Extending a Pipeline to Install a Product - -This how-to-guide will teach you how to add a product to an existing pipeline. -This includes downloading the product from Pivnet, -extracting configuration, -and installing the configured product. -If you don't already have an Ops Manager and deployed Director, -check out [Installing Ops Manager][install-how-to] and -[Deploying the Director][director-configuration] respectively. - -## Prerequisites -1. A pipeline, such as one created in [Installing Ops Manager][install-how-to] - or [Upgrading an Existing Ops Manager][upgrade-how-to]. -1. A fully configured Ops Manager and Director. -1. The Platform Automation Toolkit Docker Image [imported and ready to run][running-commands-locally]. -1. A glob pattern uniquely matching one product file on Tanzu Network. - -### Assumptions About Your Existing Pipeline -This guide assumes you're working -from one of the pipelines created in previous guides, -but you don't _have_ to have exactly that pipeline. -If your pipeline is different, though, -you may run into trouble with some of our assumptions. - -We assume: - -- Resource declarations for - `config` and `platform-automation`. -- A pivnet token stored in Credhub as a credential named `pivnet_token`. -- A previous job responsible for deploying the director - called `apply-director-changes`. -- You have created an `env.yml` from the [Configuring Env][generating-env-file] - how-to guide. This file exists in the `configuration` resource. -- You have a `fly` target named `control-plane` with an existing pipeline called `foundation`. -- You have a source control repo that contains the `foundation` pipeline's `pipeline.yml`. - -You should be able to use the pipeline YAML in this document with any pipeline, -as long as you make sure the above names match up with what's in your pipeline, -either by changing the example YAML or your pipeline. - -## Download Upload And Stage Product to Ops Manager -For this guide, we're going to add the [TAS][tas] product. - -### Download -Before setting the pipeline, we will have to -create a config file for [`download-product`][download-product] -in order to download TAS from Tanzu Network. - -Create a `download-tas.yml`. - -{% include ".download-tas-tabs.md" %} - -Add and commit this file to the same directory as the previous guides. -This file should be accessible from the `configuration` resource. -```bash -git add download-tas.yml -git commit -m "Add download-tas file for foundation" -git push -``` - -Now that we have a config file, -we can add a new `download-upload-and-stage-tas` job in your `pipeline.yml`. - -```yaml hl_lines="3-32" -jobs: # Do not duplicate this if it already exists in your pipeline.yml, - # just add the following lines to the jobs section -- name: download-upload-and-stage-tas - serial: true - plan: - - aggregate: - - get: platform-automation-image - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-tas - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - input_mapping: - config: config - params: - CONFIG_FILE: download-tas.yml - output_mapping: - downloaded-product: tas-product - downloaded-stemcell: tas-stemcell -``` - -Now that we have a runnable job, let's make a commit - -```bash -git add pipeline.yml -git commit -m 'download tas and its stemcell' -``` - -Then we can set the pipeline - -```bash -fly -t control-plane set-pipeline -p foundation -c pipeline.yml -``` - -If the pipeline sets without errors, run a `git push` of the config. - -!!! info "If fly set-pipeline returns an error" - Fix any and all errors until the pipeline can be set. - When the pipeline can be set properly, run - - ```bash - git add pipeline.yml - git commit --amend --no-edit - git push - ``` - -!!! note "Testing Your Pipeline" - We generally want to try things out right away to see if they're working right. - However, in this case, if you have a very slow internet connection and/or multiple Concourse workers, - you might want to hold off until we've got the job doing more, - so that if it works, you don't have to wait for the download again. - -### Upload and Stage -We have a product downloaded and (potentially) cached on a Concourse worker. -The next step is to upload and stage that product to Ops Manager. - -```yaml hl_lines="32-45" -jobs: -- name: download-upload-and-stage-tas - serial: true - plan: - - aggregate: - - get: platform-automation-image - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-tas - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - input_mapping: - config: config - params: - CONFIG_FILE: download-tas.yml - output_mapping: - downloaded-product: tas-product - downloaded-stemcell: tas-stemcell - - task: upload-tas-stemcell - image: platform-automation-image - file: platform-automation-tasks/tasks/upload-stemcell.yml - input_mapping: - env: config - stemcell: tas-stemcell - params: - ENV_FILE: env.yml - - task: upload-and-stage-tas - image: platform-automation-image - file: platform-automation-tasks/tasks/upload-and-stage-product.yml - input_mapping: - product: tas-product - env: config -``` - -Then we can re-set the pipeline - -```bash -fly -t control-plane set-pipeline -p foundation -c pipeline.yml -``` - -and if all is well, make a commit and push - -```bash -git add pipeline.yml -git commit -m 'upload tas and stemcell to Ops Manager' -git push -``` - -## Product Configuration -Before automating the configuration and install of the product, -we need a config file. -The simplest way is to choose your config options in the Ops Manager UI, -then pull its resulting configuration. - -!!! Info "Advanced Tile Config Option" - For an alternative that generates the configuration - from the product file, using ops files to select options, - see the [Config Template][config-template] section. - - -#### Pulling Configuration from Ops Manager -Configure the product _manually_ according to the product's install instructions. -This guide installs [tas][tas-install-vsphere]. -Other install instructions may be found in [VMware Tanzu Docs][tanzu-docs]. - -Once the product is fully configured, apply changes in the Ops Manager UI, -and then continue this guide. - -!!! warning "If You Do Not Apply Changes" - Ops Manager cannot generate credentials for you - until you have applied changes (at least once). - You can still go through this process without an initial applying changes, - but you will be unable to use `om staged-config` with `--include-credentials`, - and may have an incomplete configuration at the end of this process. - -[`om`][om] has a command called [staged-config][staged-config], -which is used to extract staged product -configuration from the Ops Manager UI. -`om` requires a `env.yml`, which we already used in the `upload-and-stage` task. - -Most products will contain the following top-level keys: - -- network-properties -- product-properties -- resource-config - -The command can be run directly using Docker. -We'll need to download the image to our local workstation, import it into Docker, -and then run `staged-config` for the [Tanzu Application Service][tas] product. -For more information on Running Commands Locally, -see the corresponding [How-to Guide][running-commands-locally]. - - -After the image has been downloaded from [Tanzu Network][tanzu-network-platform-automation] -we're going to need the product name recognized by Ops Manager. -This can be found using `om`, but first we should import the image - -```bash -export ENV_FILE=env.yml -docker import ${PLATFORM_AUTOMATION_IMAGE_TGZ} platform-automation-image -``` - -Then, we can run `om staged-products` to find the name of the product in Ops Manager. -```bash -docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ -om --env ${ENV_FILE} staged-products -``` - -The result should be a table that looks like the following -```text -+---------------------------+-----------------+ -| NAME | VERSION | -+---------------------------+-----------------+ -| cf | | -| p-bosh | | -+---------------------------+-----------------+ -``` - -`p-bosh` is the name of the director. -As `cf` is the only other product on our Ops Manager, -we can safely assume that this is the product name for [TAS][tas]. - -Using the product name `cf`, -let's extract the current configuration from Ops Manager. - -```bash -docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ -om --env ${ENV_FILE} staged-config --include-credentials --product-name cf > tas-config.yml -``` - -We have a configuration file for our tile ready to back up! Almost. -There are a few more steps required before we're ready to commit. - -#### Parameterizing the Config -Look through your `tas-config.yml` for any sensitive values. -These values should be `((parameterized))` -and saved off in a secrets store (in this example, we'll use Credhub). - -You should still be logged into Credhub. -If not, login. Be sure to note the space at the beginning of the line. -This will ensure your valuable secrets are not saved in terminal history. - -{% include ".logging-into-credhub.md" %} - -The example list of some sensitive values from our `tas-config.yml` are as follows, -note that this is intentionally incomplete. -```yaml -product-properties: - .properties.cloud_controller.encrypt_key: - value: - secret: my-super-secure-secret - .properties.networking_poe_ssl_certs: - value: - - certificate: - cert_pem: |- - -----BEGIN CERTIFICATE----- - my-cert - -----END CERTIFICATE----- - private_key_pem: |- - -----BEGIN RSA PRIVATE KEY----- - my-private-key - -----END RSA PRIVATE KEY----- - name: certificate -``` - -We'll start with the Cloud Controller encrypt key. -As this is a value that you might wish to rotate at some point, -we're going to store it off as a `password` type into Credhub. - -```bash -# note the starting space - credhub set \ - --name /concourse/your-team-name/cloud_controller_encrypt_key \ - --type password \ - --password my-super-secure-secret -``` - -To validate that we set this correctly, -we should run. - -```bash -# no need for an extra space -credhub get --name /concourse/your-team-name/cloud_controller_encrypt_key -``` - -and expect an output like -```text -id: -name: /concourse/your-team-name/cloud_controller_encrypt_key -type: password -value: my-super-secure-secret -version_created_at: "" -``` - -We are then going to store off the Networking POE certs -as a `certificate` type in Credhub. -But first, we're going to save off the certificate and private key -as plain text files to simplify this process. -We named these files `poe-cert.txt` and `poe-private-key.txt`. -There should be no formatting or indentation in these files, only new lines. - -```bash -# note the starting space - credhub set \ - --name /concourse/your-team-name/networking_poe_ssl_certs \ - --type rsa \ - --public poe-cert.txt \ - --private poe-private-key.txt -``` - -And again, we're going to validate that we set this correctly - -```bash -# no need for an extra space -credhub get --name /concourse/your-team-name/networking_poe_ssl_certs -``` - -and expect and output like - -```text -id: -name: /concourse/your-team-name/networking_poe_ssl_certs -type: rsa -value: - private_key: | - -----BEGIN RSA PRIVATE KEY----- - my-private-key - -----END RSA PRIVATE KEY----- - public_key: | - -----BEGIN CERTIFICATE----- - my-cert - -----END CERTIFICATE----- -version_created_at: "" -``` - -!!! warning "Remove Credentials from Disk" - Once we've validated that the certs are set correctly in Credhub, - remember to delete `poe-cert.txt` and `poe-private-key.txt` from your working directory. - This will prevent a potential security leak, - or an accidental commit of those credentials. - -Repeat this process for all sensitive values found in your `tas-config.yml`. - -Once completed, we can remove those secrets from `tas-config.yml` -and replace them with `((parameterized-values))`. -The parameterized value name should match the name in Credhub. -For our example, we parameterized the config like: - -```yaml -product-properties: - .properties.cloud_controller.encrypt_key: - value: - secret: ((cloud_controller_encrypt_key)) - .properties.networking_poe_ssl_certs: - value: - - certificate: - cert_pem: ((networking_poe_ssl_certs.public_key)) - private_key_pem: ((networking_poe_ssl_certs.private_key)) - name: certificate -``` - -Once your `tas-config.yml` is parameterized to your liking, -we can finally commit the config file. - -```bash -git add tas-config.yml -git commit -m "Add tas-config file for foundation" -git push -``` - -## Configure and Apply -With the hard part out of the way, -we can now configure the product and apply changes. - -First, we need to update the pipeline -to have a configure-product step. - -```yaml hl_lines="46-76" -jobs: -- name: download-upload-and-stage-tas - serial: true - plan: - - aggregate: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-tas - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - input_mapping: - config: config - params: - CONFIG_FILE: download-tas.yml - output_mapping: - downloaded-product: tas-product - downloaded-stemcell: tas-stemcell - - task: upload-tas-stemcell - image: platform-automation-image - file: platform-automation-tasks/tasks/upload-stemcell.yml - input_mapping: - env: config - stemcell: tas-stemcell - params: - ENV_FILE: env/env.yml - - task: upload-and-stage-tas - image: platform-automation-image - file: platform-automation-tasks/tasks/stage-product.yml - input_mapping: - product: tas-product - env: config -- name: configure-tas - serial: true - plan: - - aggregate: - - get: platform-automation-image - passed: [download-upload-and-stage-tas] - trigger: true - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - passed: [download-upload-and-stage-tas] - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: configure-tas - image: platform-automation-image - file: platform-automation-tasks/tasks/configure-product.yml - input_mapping: - config: config - env: config - params: - CONFIG_FILE: tas-config.yml -``` - -This new job will configure the TAS product -with the config file we previously created. - -Next, we need to add an apply-changes job -so that these changes will be applied by the Ops Manager. - -```yaml hl_lines="31-56" -- name: configure-tas - serial: true - plan: - - aggregate: - - get: platform-automation-image - trigger: true - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - passed: [download-upload-and-stage-tas] - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: configure-tas - image: platform-automation-image - file: platform-automation-tasks/tasks/configure-product.yml - input_mapping: - config: config - env: config - params: - CONFIG_FILE: tas-config.yml -- name: apply-changes - serial: true - plan: - - aggregate: - - get: platform-automation-image - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - passed: [configure-tas] - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: apply-changes - image: platform-automation-image - file: platform-automation-tasks/tasks/apply-changes.yml - input_mapping: - env: config -``` - -!!! info "Adding Multiple Products" - When adding multiple products, you can add the configure jobs as passed constraints - to the apply-changes job so that they all are applied at once. - Ops Manager will handle any inter-product dependency ordering. - This will speed up your apply changes - when compared with running an apply changes for each product separately. - - Example: - `passed: [configure-tas, configure-tas-windows, configure-healthwatch]` - - -Set the pipeline one final time, -run the job, and see it pass. - -```bash -fly -t control-plane set-pipeline -p foundation -c pipeline.yml -``` - -Commit the final changes to your repository. - -```bash -git add pipeline.yml -git commit -m "configure-tas and apply-changes" -git push -``` - -You have now successfully added a product to your automation pipeline. - -## Advanced Concepts -### Config Template -An alternative to the staged-config workflow -outlined in the how-to guide is `config-template`. - -`config-template` is an `om` command that creates a base config file with optional ops files -from a given tile or pivnet slug. - -This section will assume [TAS][tas], like the how-to guide above. - -#### Generate the Config Template Directory - -```bash -# note the leading space - export PIVNET_API_TOKEN='your-vmware-tanzu-network-api-token' -``` - -```bash -docker run -it -v $HOME/configs:/configs platform-automation-image \ -om config-template \ - --output-directory /configs/ \ - --pivnet-api-token "${PIVNET_API_TOKEN}" \ - --pivnet-product-slug elastic-runtime \ - --product-version '2.5.0' \ - --product-file-glob 'cf*.pivotal' # Only necessary if the product has multiple .pivotal files -``` - -This will create or update a directory at `$HOME/configs/cf/2.5.0/`. - -`cd` into the directory to get started creating your config. - -#### Interpolate a Config - -The directory will contain a `product.yml` file. -This is the template for the product configuration you're about to build. -Open it in an editor of your choice. -Get familiar with what's in there. -The values will be variables intended to be interpolated from other sources, -designated with the`(())` syntax. - -You can find the value for any property with a default in the `product-default-vars.yml` file. -This file serves as a good example of a variable source. -You'll need to create a vars file of your own for variables without default values. -For the base template, you can get a list of required variables by running -```bash -docker run -it -v $HOME/configs:/configs platform-automation-image \ -om interpolate \ - --config product.yml \ - -l product-default-vars.yml \ - -l resource-vars.yml \ - -l errand-vars.yml -``` - -Put all those vars in a file and give them the appropriate values. -Once you've included all the variables, -the output will be the finished template. -For the rest of this guide, -we will refer to these vars as `required-vars.yml`. - -There may be situations that call for splitting your vars across multiple files. -This can be useful if there are vars that need to be interpolated when you apply the configuration, -rather than when you create the final template. -You might consider creating a separate vars file for each of the following cases: - -- credentials (these vars can then be [persisted separately/securely][secrets-handling]) -- foundation-specific variables when using the same template for multiple foundations - -You can use the `--skip-missing` flag when creating your final template -using `om interpolate` to leave such vars to be rendered later. - -If you're having trouble figuring out what the values should be, -here are some approaches you can use: - -- Look in the template where the variable appears for some additional context of its value. -- Look at the tile's online documentation -- Upload the tile to an Ops Manager - and visit the tile in the Ops Manager UI to see if that provides any hints. - - If you are still struggling, inspecting the html of the Ops Manager webpage - can more accurately map the value names to the associated UI element. - -!!! info "When Using The Ops Manager Docs and UI" - Be aware that the field names in the UI do not necessarily map directly to property names. - -#### Optional Features -The above process will get you a default installation, -with no optional features or variables, -that is entirely deployed in a single Availability Zone (AZ). - -In order to provide non-required variables, -use multiple AZs, -or make non-default selections for some options, -you'll need to use some of the ops files in one of the following four directories: - - - - - - - - - - - - - - - - - - -
featuresallow the enabling of selectors for a product. For example, enabling/disabling of an s3 bucket
networkcontains options for enabling 2-3 availability zones for network configuration
optionalcontains optional properties without defaults. For optional values that can be provided more than once, there's an ops file for each param count
resourcecontains configuration that can be applied to resource configuration. For example, BOSH VM extensions
- -For more information on BOSH VM Extensions, refer to the [Creating a Director Config File How-to Guide][vm-extensions]. - -To use an ops file, add `-o` -with the path to the ops file you want to use to your `interpolate` command. - -So, to enable TCP routing in Tanzu Application Service, you would add `-o features/tcp_routing-enable.yml`. -For the rest of this guide, the vars for this feature -are referred to as `feature-vars.yml`. -If you run your complete command, you should again get a list of any newly-required variables. - -```bash -docker run -it -v $HOME/configs:/configs platform-automation-image \ -om interpolate \ - --config product.yml \ - -l product-default-vars.yml \ - -l resource-vars.yml \ - -l required-vars.yml \ - -o features/tcp_routing-enable.yml \ - -l feature-vars.yml \ - -l errand-vars.yml -``` - -#### Finalize Your Configuration - -Once you've selected your ops files and created your vars files, -decide which vars you want in the template -and which you want to have interpolated later. - -Create a final template and write it to a file, -using only the vars you want to in the template, -and using `--skip-missing` to allow the rest to remain as variables. - -```bash -docker run -it -v $HOME/configs:/configs platform-automation-image \ -om interpolate \ - --config product.yml \ - -l product-default-vars.yml \ - -l resource-vars.yml \ - -l required-vars.yml \ - -o features/tcp_routing-enable.yml \ - -l feature-vars.yml \ - -l errand-vars.yml \ - --skip-missing \ - > pas-config-template.yml -``` - -You can check-in the resulting configuration to a git repo. -For vars that do not include credentials, you can check those vars files in, as well. -Handle vars that are secret [more carefully][secrets-handling]. - -You can then dispose of the config template directory. - -## Using Ops Files for Multi-Foundation - -There are two recommended ways to support multiple foundation workflows: -using [secrets management][multi-foundation-secrets-handling] or ops files. -This section will explain how to support multiple foundations using ops files. - -Starting with an **incomplete** [Tanzu Application Service][tas] config from **vSphere** as an example: - -{% include ".cf-partial-config.md" %} - -For a single foundation deploy, leaving values such as -`".cloud_controller.apps_domain"` as-is would work fine. For multiple -foundations, this value will be different per deployed foundation. Other values, -such as `.cloud_controller.encrypt_key` have a secret that -already have a placeholder from `om`. If different foundations have different -load requirements, even the values in `resource-config` can be edited using -[ops files][ops-files]. - -Using the example above, let's try filling in the existing placeholder for -`cloud_controller.apps_domain` in our first foundation. -```yaml -# replace-domain-ops-file.yml -- type: replace - path: /product-properties/.cloud_controller.apps_domain/value? - value: unique.foundation.one.domain -``` - -To test that the ops file will work in your `base.yml`, this can be done locally using `bosh int`: -```bash - bosh int base.yml -o replace-domain.yml -``` - -This will output `base.yml` with the replaced(interpolated) values: - -{% include ".cf-partial-config-domain-interpolated.md" %} - -Anything that needs to be different per deployment can be replaced via ops files as long as the `path:` is correct. - -Upgrading products to new patch versions: - -* Configuration settings should not differ between successive patch versions within the same minor version line. - Underlying properties or property names may change, - but the tile's upgrade process automatically translates properties to the new fields and values. -* VMware cannot guarantee the functionality of upgrade scripts in third-party products. - -Replicating configuration settings from one product to the same product on a different foundation: - -* Because properties and property names can change between patch versions of a product, - you can only safely apply configuration settings across products if their versions exactly match. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/configuring-auth.html.md.erb b/docs/how-to-guides/configuring-auth.html.md.erb new file mode 100644 index 00000000..acf8f47a --- /dev/null +++ b/docs/how-to-guides/configuring-auth.html.md.erb @@ -0,0 +1,33 @@ +# Generating an Auth file + +The VMware Tanzu Operations Manager authentication system can be configured several ways. +The format of the configuration file varies +according to the authentication method to be used. + +## Configure authentication + +See [configure-authentication](../tasks.html#configure-authentication). + +<%= partial "examples/auth" %> + +## Configure LDAP authentication + +See [configure-ldap-authentication](../tasks.html#configure-ldap-authentication). + +<%= partial "examples/auth-ldap" %> + +## Configure SAML authentication + +See [configure-saml-authentication](../tasks.html#configure-saml-authentication). + +<%= partial "examples/auth-saml" %> + +## Managing configuration, auth, and state files + +To use all these files with the Concourse tasks that require them, +you need to make them available as Concourse resources. +They’re all text files. +There are many resource types that can work for this. +In our examples, we use a git repository. +As with the tasks and image, +you need to declare a resource in your pipeline for each repo you need. diff --git a/docs/how-to-guides/configuring-auth.md b/docs/how-to-guides/configuring-auth.md deleted file mode 100644 index 24d0c4a6..00000000 --- a/docs/how-to-guides/configuring-auth.md +++ /dev/null @@ -1,27 +0,0 @@ -## Generating an Auth File -Ops Manager's authentication system can be configured several ways. -The format of the configuration file varies -according to the authentication method to be used. - -### [configure-authentication][configure-authentication]: ----excerpt--- "examples/auth-configuration" - -### [configure-ldap-authentication][configure-ldap-authentication]: ----excerpt--- "examples/ldap-auth-configuration" - -### [configure-saml-authentication][configure-saml-authentication]: ----excerpt--- "examples/saml-auth-configuration" - -## Managing Configuration, Auth, and State Files -To use all these files with the Concourse tasks that require them, -you need to make them available as Concourse Resources. -They’re all text files. -There are many resource types that can work for this. -In our examples, we use a git repository. -As with the tasks and image, -you’ll need to declare a resource in your pipeline for each repo you need. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md"%} diff --git a/docs/how-to-guides/configuring-env.md b/docs/how-to-guides/configuring-env.html.md.erb similarity index 60% rename from docs/how-to-guides/configuring-env.md rename to docs/how-to-guides/configuring-env.html.md.erb index 6a9cbb86..44db8a43 100644 --- a/docs/how-to-guides/configuring-env.md +++ b/docs/how-to-guides/configuring-env.html.md.erb @@ -1,24 +1,25 @@ -## Generating an Env File -Almost all [`om`][om] commands require an env file -to describe how to communicate (and authenticate) with a given Ops Manager. +# Generating an Env file + +Almost all [`om` commands](https://github.com/pivotal-cf/om) require an env file +to describe how to communicate (and authenticate) with a given VMware Tanzu Operations Manager. There are two ways to provide auth information. + +## Username and password + If your configuration choices allow you to use `username` and `password` directly, you can do so: ----excerpt--- "examples/env" +<%= partial "../examples/env" %> + +## UAA client However, if you're using an external identity provider via SAML or LDAP integration, you'll need to use a UAA client via `client-id` and `client-secret`: ----excerpt--- "examples/env-uaa" +<%= partial "../examples/env-uaa" %> While `decryption-passphrase` is nominally optional, if you intend to use a single `env.yml` for an entire pipeline, it will be necessary to include for use with the `import-installation` step. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/creating-a-director-config-file.html.md.erb b/docs/how-to-guides/creating-a-director-config-file.html.md.erb new file mode 100644 index 00000000..9a630a5d --- /dev/null +++ b/docs/how-to-guides/creating-a-director-config-file.html.md.erb @@ -0,0 +1,161 @@ +# Configuring a Director config file + +A director config file is an externalized config that lives outside of VMware Tanzu Operations Manager. Extracting this file can make it easier to manage multiple foundations. It also helps with: + +- traceability +- avoiding configuration drift +- configuration promotion + + +## Prerequisites + +To extract the configuration for a director, you need a Tanzu Operations Manager VM. +For detailed instructions, see [Installing Tanzu Operations Manager](./installing-opsman.html). + +## Extracting configuration + +In [om](https://github.com/pivotal-cf/om) there is a command called [staged-director-config](../tasks.html#staged-director-config), which is used to extract +the Tanzu Operations Manager and the BOSH director configuration from the targeted foundation. + +<%= partial "../missing_fields_opsman_director" %> + +Sample usage: +`om --env env.yml staged-director-config > director.yml` + +This gives you the whole configuration of Tanzu Operations Manager in a single YAML file. +It will look more or less the same as the example above. You can check it +in to your VCS. + +The following is an example configuration file for Tanzu Operations Manager that might be returned +after running this command: + +<%= partial "../examples/director" %> + +## Configuring Director using config file + +Now you can modify the settings in the configuration file directly instead of +operating in the Web UI. After you finish editing the file, the configuration +file will need to be applied back to the Tanzu Operations Manager instance. The command +[configure-director](../tasks.html#configure-director) will do the job. + +Sample usage: +`om --env env.yml configure-director --config director.yml` + + +## Promoting Tanzu Operations Manager to another foundation + +The configuration file is the exact state of a given foundation, and it contains +some environment-specific properties. You need to edit these +properties manually to reflect the state of the new foundation. Or, when extracting +the configuration file from the foundation, you can use the flag +`--include-placeholders`. It will help to parameterize some variables to +ease the process of adapting the configuration for another foundation. + +## VM extensions + +You may specify custom VM extensions to be used in deployments. +To learn more about how various IaaS's support and use these extensions, +see the [BOSH docs](https://bosh.io/docs/cloud-config/#vm-extensions). + +Using VM Extensions for your director configuration +is an advanced feature of Tanzu Operations Manager. +Sometimes it is necessary to define these extensions +to perform certain tasks on your Tanzu Operations Manager director, +but they are not required to run a foundation(s), +and they change the default behavior if they are defined. + +Use with caution. + +In the following example, two new VM extensions are defined +and will be added to the list of available extensions on the next [`configure-director`](../tasks.html#configure-director). +This can be added to the end of your existing `director.yml`, +or defined independently and set with no other configurations present. + +There are no default VM Extensions on a deployed Tanzu Operations Manager. + +`director.yml` Example: + +```yaml +vmextensions-configuration: +- name: a_vm_extension + cloud_properties: + source_dest_check: false +- name: another_vm_extension + cloud_properties: + foo: bar +... +``` + +To use VM Extensions in either your director or product, +define `additional_vm_extensions`: + +```yaml +resource-configuration: + director: + additional_networks: [] + additional_vm_extensions: [a_vm_extension,another_vm_extension] +... +``` + +## VM types + +You may specify custom VM types to be used in deployments. +To learn more about how various IAAS's support and use these types, +[see the BOSH docs](https://bosh.io/docs/cloud-config/#vm-types). + +Using VM Types for your director configuration +is an advanced feature of Tanzu Operations Manager. +VM Types are not required to run a foundation(s), +and they change the default behavior if they are defined. + +Use with caution. + +In the following example, two new VM types are defined +and will be added to the list of available types on the next [`configure-director`](../tasks.html#configure-director). +This can be added to the end of your existing `director.yml`, +or defined independently and set with no other configurations present. + +`director.yml` Example: + +```yaml +vmtypes-configuration: + custom_only: false + vm_types: + - name: x1.large + cpu: 8 + ram: 8192 + ephemeral_disk: 10240 + - name: mycustomvmtype + cpu: 4 + ram: 16384 + ephemeral_disk: 4096 +... +``` + +### VM Types precedence + +The precedence rules are listed in the following table. + + + + + + + + + + + + + + + + + + + + + + +
1If custom_only is true, + the VM types specified in your configuration will replace the entire list of available VM types in the Tanzu Operations Manager.
2If the property is set to false or is omitted, configure_director will append the listed VM types to the list of default VM types for your IaaS.
3If a specified VM type is named the same as a predefined VM type, it will overwrite the predefined type.
4If multiple specified VM types have the same name, the one specified last will be created.
5Existing custom VM types do not persist across configure-director calls, and it should be expected that the entire list of custom VM types is specified in the director configuration.
diff --git a/docs/how-to-guides/creating-a-director-config-file.md b/docs/how-to-guides/creating-a-director-config-file.md deleted file mode 100644 index ff0aabce..00000000 --- a/docs/how-to-guides/creating-a-director-config-file.md +++ /dev/null @@ -1,135 +0,0 @@ -Extracting a director configuration file, an externalized config that lives outside of Ops Manager, can make it easier to manage multiple foundations as well as help with: - -- traceability -- avoiding configuration drift -- configuration promotion - - -## Prerequisites -To extract the configuration for a director, you will first need an Ops Manager vm. -For detailed instructions, follow the [Installing Ops Manager][install-how-to] how-to guide. - -## Extracting Configuration -[om][om] has a command called [staged-director-config][staged-director-config], which is used to extract -the Ops Manager and the BOSH director configuration from the targeted foundation. - -{% include ".missing_fields_opsman_director.md" %} - -Sample usage: -`om --env env.yml staged-director-config > director.yml` -will give you the whole configuration of Ops Manager in a single yml file. -It will look more or less the same as the example above. You can check it -in to your VCS. - -The following is an example configuration file for Ops Manager that might return -after running this command: ----excerpt--- "examples/director-configuration" - -## Configuring Director Using Config File -Now you can modify the settings in the configuration file directly instead of -operating in the web ui. After you finish editing the file, the configuration -file will need to apply back to the Ops Manager instance. The command -[configure-director][configure-director] will do the job. - -Sample usage: -`om --env env.yml configure-director --config director.yml` - - -## Promoting Ops Manager to Another Foundation -The configuration file is the exact state of a given foundation, it contains -some environment specific properties. You need to manually edit those -properties to reflect the state of the new foundation. Or, when extracting -the configuration file from the foundation, you can use the flag -`--include-placeholders`, it will help to parameterize some variables to -ease the process of adapt for another foundation. - -## VM Extensions -You may specify custom VM extensions to be used in deployments. -To learn more about how various IAAS's support and use these extensions, -[see the Bosh docs][bosh-vm-extensions]. - -Using VM Extensions for your director configuration -is an _advanced feature_ of Ops Manager. -Sometimes it is necessary to define these extensions -in order to perform certain tasks on your Ops Manager director, -but they are not required to run a foundation(s), -and will change default behavior if defined. - -Use at your own discretion. - -In the following example, two new VM extensions are defined -and will be added to the list of available extensions on the next [`configure-director`][configure-director]. -This can be added to the end of your existing `director.yml`, -or defined independently and set with no other configurations present. - -There are no default VM Extensions on a deployed Ops Manager. - -`director.yml` Example: -```yaml -vmextensions-configuration: -- name: a_vm_extension - cloud_properties: - source_dest_check: false -- name: another_vm_extension - cloud_properties: - foo: bar -... -``` - -To use VM Extensions in either your director or product, -define `additional_vm_extensions` like so: -```yaml -resource-configuration: - director: - additional_networks: [] - additional_vm_extensions: [a_vm_extension,another_vm_extension] -... -``` - -## VM Types -You may specify custom VM types to be used in deployments. -To learn more about how various IAAS's support and use these types, -[see the Bosh docs][bosh-vm-types]. - -Using VM Types for your director configuration -is an _advanced feature_ of Ops Manager. -VM Types are not required to run a foundation(s), -and will change default behavior if defined. - -Use at your own discretion. - -In the following example, two new VM types are defined -and will be added to the list of available types on the next [`configure-director`][configure-director]. -This can be added to the end of your existing `director.yml`, -or defined independently and set with no other configurations present. - -`director.yml` Example: -```yaml -vmtypes-configuration: - custom_only: false - vm_types: - - name: x1.large - cpu: 8 - ram: 8192 - ephemeral_disk: 10240 - - name: mycustomvmtype - cpu: 4 - ram: 16384 - ephemeral_disk: 4096 -... -``` - -!!! note "Precedence" - - If `custom_only` is `true`, - the VM types specified in your configuration will replace the entire list of available VM types in the Ops Manager. - - If the property is set to false or is omitted, - `configure_director` will append the listed VM types to the list of default VM types for your IaaS. - - If a specified VM type is named the same as a predefined VM type, it will overwrite the predefined type. - - If multiple specified VM types have the same name, the one specified last will be created. - - Existing custom VM types do not persist across configure-director calls, - and it should be expected that the entire list of custom VM types is specified in the director configuration. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/git-repo-layout.html.md.erb b/docs/how-to-guides/git-repo-layout.html.md.erb new file mode 100644 index 00000000..3b6a4dc2 --- /dev/null +++ b/docs/how-to-guides/git-repo-layout.html.md.erb @@ -0,0 +1,166 @@ +# Why use Git and GitHub? + +GitHub is a system that provides Git remotes, +essentially, an internet accessible backup to the git repositories on your computer. +Using a remote enables a pipeline +to access and update the state and configuration files. + +Git is a commonly used version control tool. +It can be used to track code changes made to files in a repository (or "repo"). +Changes can then be "pushed" to or "pulled" from remote copies of that repository. + +

+There are many alternatives to GitHub including +GitLab, Google Cloud Source Repositories, and so on. +Any remote Git client will work with Platform Automation Toolkit and Concourse. +See the Concourse Git resource +documentation for details.

+ +To learn more about Git and GitHub, +read this short [git handbook](https://docs.github.com/en/get-started/using-git/about-git). + +## Creating a Git repository + +To create a new, local Git repo: + +```bash +# start a new repository +git init my-platform-automation + +# navigate to the new directory it creates +cd my-platform-automation + +# create a new directory for your config +mkdir config + +# create remaining directories +mkdir env state vars + +# create config files for director and opsman +touch config/director.yml +touch config/opsman.yml + +# create env file +touch env/env.yml + +# create state file +touch state/state.yml + +# Optional: +# create vars files for parameters corresponding to configs +touch vars/director-vars.yml +touch vars/opsman-vars.yml + +# commit the file to the repository +git commit -m "add initial files" +``` + +## Creating a GitHub Repository + +Go to GitHub and create a new remote repository. + +1. Under your profile, select **Repositories**. +1. Select **New**. +1. Name your new repository and follow the prompts. +1. When prompted, do not add any default files. +1. Copy the URL of your new GitHub repository. +1. Set the local Git repo's remote to the new GitHub repo: + + ```bash + # enter the path for the new GitHub repo + git remote add origin https://github.com/YOUR-USERNAME/YOUR-REPOSITORY.git + + # push your changes to the default branch + git push --set-upstream origin main + ``` + +You should now see your GitHub repo populated +with the directories and empty files. + +

+A GitHub repository may be referenced +as a remote repo by HTTPS or by SSH. +In general, SSH keys are more secure. +The Concourse Git resource +supports using SSH keys to pull from a repository. +For more information on using SSH keys with GitHub, +see the SSH documentation.

+ +## Recommended file structure + +You now have both a local Git repo and a remote on GitHub, +including the recommended structure +for a Platform Automation Toolkit configuration repo: + +```tree +├── my-platform-automation +│   ├── config +│   ├── env +│   ├── state +│   └── vars +``` + + + + + + + + + + + + + + + + + + + + + + +
Folder nameContents
config + Holds config files for the products installed on your foundation. + If using CredHub and/or vars files, + these config files should contain your ((parametrized)) values. +
env + Holds env.yml, + the environment file used by tasks that interact with Tanzu Operations Manager. +
vars + Holds product-specific vars files. + The fields in these files are used to fill in + ((parameters)) during interpolation steps. +
state + Holds state.yml, + which contains the VM ID for the Tanzu Operations Manager VM. +
+ +For further details about the contents of these files, +see [Inputs and outputs](../inputs-outputs.html). + +

+Never commit secrets to Git. +It is a best practice not to commit secrets, +including passwords, keys, and sensitive information, +to Git or GitHub. Instead, use ((parameters)). +For more information about a recommended way to do this +using CredHub or vars files +see Using a secrets store to store credentials.

+ +## Multi-foundation structure + +The setup described in this topic is just one example of how to structure your configuration repository. +You may instead decide to have a repo for just config files and separate repos +just for vars files. This decouples the config parameter names from their values +for multi-foundation templating. + +There are many possibilities for structuring Git repos in complex situations. +For guidance on how to best set up your git's file structure, +see [Inputs and outputs](../inputs-outputs.html). +Take note of the `inputs` and `outputs` of the +various [Platform Automation Toolkit tasks](../tasks.html). +As long as the various input / output mappings correctly correlate +to the expected inputs and outputs of the Platform Automation Toolkit tasks, +any file structure could theoretically work. diff --git a/docs/how-to-guides/git-repo-layout.md b/docs/how-to-guides/git-repo-layout.md deleted file mode 100644 index 2ee094ec..00000000 --- a/docs/how-to-guides/git-repo-layout.md +++ /dev/null @@ -1,167 +0,0 @@ -# Why use Git and GitHub? - -GitHub is a system that provides Git remotes, -essentially, an internet accessible backup to the git repositories on your computer. -Using a remote will enable a pipeline -to access and update the state and configuration files. - -Git is a commonly used version control tool. -It can be used to track code changes made to files within a repository (or "repo"). -Changes can then be "pushed" to or "pulled" from remote copies of that repository. - -!!! Info "GitHub alternatives" - There are many alternatives to GitHub including - Gitlabs, Google Cloud Source Repositories, etc. - Any remote Git client will work with Platform Automation Toolkit and Concourse. - Refer to the [Concourse Git resource][concourse-git-resource] documentation for details. - -To learn more about Git and Github, -you can [read this short git handbook][github-git-handbook]. - -## Creating a Git Repository - -To create a new, local Git repo: - -```bash -# start a new repository -git init my-platform-automation - -# navigate to the new directory it creates -cd my-platform-automation - -# create a new directory for your config -mkdir config - -# create remaining directories -mkdir env state vars - -# create config files for director and opsman -touch config/director.yml -touch config/opsman.yml - -# create env file -touch env/env.yml - -# create state file -touch state/state.yml - -# Optional: -# create vars files for parameters corresponding to configs -touch vars/director-vars.yml -touch vars/opsman-vars.yml - -# commit the file to the repository -git commit -m "add initial files" -``` - -## Creating a GitHub Repository - -Next, navigate to GitHub and create a new remote repository. - -1. Under your profile, select "Repositories" -1. Select "New" -1. Name your new repository and follow the prompts -1. Do not select to add any default files when prompted -1. Copy the URL of your new GitHub repository - -Now, we can set the local Git repo's -remote to the new GitHub repo: - -```bash -# enter the path for the new GitHub repo -git remote add origin https://github.com/YOUR-USERNAME/YOUR-REPOSITORY.git - -# push your changes to the default branch -git push --set-upstream origin main -``` - -You should now see your GitHub repo populated -with the directories and empty files. - -!!! tip "Using GitHub with SSH" - A GitHub repository may be referenced - as a remote repo by HTTPS or by SSH. - In general, SSH keys are more secure. - The [Concourse Git resource][concourse-git-resource] - supports using SSH keys to pull from a repository. - For more information on using SSH keys with GitHub, - refer to this [SSH documentation.][github-ssh] - -## Recommended File Structure - -You now have both a local Git repo and a remote on GitHub. -The above commands give you the recommended structure -for a Platform Automation Toolkit configuration repo: - -```tree -├── my-platform-automation -│   ├── config -│   ├── env -│   ├── state -│   └── vars -``` - - - - - - - - - - - - - - - - - - -
config - Holds config files for the products installed on your foundation. - If using Credhub and/or vars files, - these config files should have your ((parametrized)) values present in them -
env - Holds env.yml, - the environment file used by tasks that interact with Ops Manager. -
vars - Holds product-specific vars files. - The fields in these files get used to fill in - ((parameters)) during interpolation steps. -
state - Holds state.yml, - which contains the VM ID for the Ops Manager VM. -
- -For further details regarding the contents of these files, -please refer to the [Inputs and Outputs][inputs-outputs] documentation. - -!!! warning "Never commit secrets to Git" - It is a best practice to **_not_** commit secrets, - including passwords, keys, and sensitive information, - to Git or GitHub. Instead, use `((parameters))`. - For more information on a recommended way to do this, - using Credhub or vars files, - review the [handling secrets documentation.][secrets-handling] - -## Multi-foundation - -The above is just one example of how to structure your configuration repository. -You may instead decide to have a repo for just config files and separate repos -just for vars files. This decouples the config parameter names from their values -for multi-foundation templating. - -There are many possibilities for structuring Git repos in these complex situations. -For guidance on how to best set up your git's file structure, -refer to the [Inputs and Outputs][inputs-outputs] documentation -and take note of the `inputs` and `outputs` of the -various [Platform Automation Toolkit tasks][task-reference]. -As long as the various input / output mappings correctly correlate -to the expected ins and outs of the Platform Automation Toolkit tasks, -any file structure could theoretically work. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/installing-opsman.html.md.erb b/docs/how-to-guides/installing-opsman.html.md.erb new file mode 100644 index 00000000..219273f0 --- /dev/null +++ b/docs/how-to-guides/installing-opsman.html.md.erb @@ -0,0 +1,635 @@ +# Writing a pipeline to install Tanzu Operations Manager + +This how-to-guide shows you how to write a pipeline for installing a new VMware Tanzu Operations Manager. +If you already have a Tanzu Operations Manager VM, see [Upgrading an existing Tanzu Operations Manager](./upgrade-existing-opsman.html). + +<%= partial "getting-started" %> + +## Downloading Tanzu Operations Manager + +1. First, switch out the test job +for one that downloads and installs Tanzu Operations Manager. +Do this by changing: + + - the `name` of the job + - the `name` of the task + - the `file` of the task + + The first task in the job should be [`download-product`](../tasks.html#download-product). + It has an additional required input; + the `config` file `download-product` uses to talk to the Broadcom Support portal. + +1. Before writing that file and making it available as a resource, +`get` it (and reference it in the params) +as if it's there. + + It also has an additional output (the downloaded image). + It will be used in a subsequent step, + so you don't have to `put` it anywhere. + +1. Finally, while it's fine for `test` to run in parallel, +the install process shouldn't, so +you also need to add `serial: true` to the job. + + ```yaml hl_lines="2 3 15-20" + jobs: + - name: install-ops-manager + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - task: download-product + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-ops-manager.yml + ``` + + 1. If you try to `fly` this up to Concourse, + it will again throw errors about resources that don't exist, + so the next step is to make them. + The first new resource you need is the config file. + 2. Push your git repo to a remote on GitHub + to make this (and later, other) configuration available to the pipelines. + GitHub has good [instructions](https://docs.github.com/en/migrations/importing-source-code/using-the-command-line-to-import-source-code/adding-locally-hosted-code-to-github) + you can follow to create a new repository on GitHub. + You can skip over the part + about using `git init` to set up your repo, + since you did that earlier. + +1. Now set up your remote +and use `git push` to make it available. +You will use this repository to hold our single foundation specific configuration. +These instructions use the ["Single repository for each Foundation"](../pipeline-design/configuration-management-strategies.html#single-foundation-pattern) +pattern to structure the configurations. + +1. You must add the repository URL to CredHub so that you can reference it +later when you declare the corresponding resource. + + ```bash + # note the starting space throughout + credhub set \ + -n /concourse/your_team_name/foundation/pipeline-repo \ + -t value -v git@github.com:username/your-repo-name + ``` + + `download-ops-manager.yml` holds creds for communicating with the Broadcom Support portal, + and uniquely identifies a Tanzu Operations Manager image to download. + + An example `download-ops-manager.yml` is shown below. + +1. Create a `download-ops-manager.yml` for the IaaS you are using. + + <%= partial "opsman-config-tabs" %> + +1. Add and commit the new file: + + ```bash + git add download-ops-manager.yml + git commit -m "Add download-ops-manager file for foundation" + git push + ``` + + Now that the download-ops-manager file you need is in git, + you need to add a resource to tell Concourse how to get it as `config`. + +1. Since this is (probably) a private repo, +you need to create a deploy key Concourse can use to access it. +Follow the [GitHub instructions](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/managing-deploy-keys#deploy-keys) +for creating a deploy key. + +2. Then, put the private key in CredHub so you can use it in your pipeline: + + ```bash + # note the space at the beginning of the next line + credhub set \ + --name /concourse/your-team-name/plat-auto-pipes-deploy-key \ + --type ssh \ + --private the/filepath/of/the/key-id_rsa \ + --public the/filepath/of/the/key-id_rsa.pub + ``` + +1. Add this to the resources section of your pipeline file: + + ```yaml + - name: config + type: git + source: + uri: ((pipeline-repo)) + private_key: ((plat-auto-pipes-deploy-key.private_key)) + branch: main + ``` + +1. Now place the Broadcom Support token in CredHub: + + ```bash + # note the starting space throughout + credhub set \ + -n /concourse/your_team_name/foundation/pivnet_token \ + -t value -v your-pivnet-token + ``` + + <%= partial include './paths-and-pipeline-names' %> + +1. To perform interpolation in one of your input files, +use the [`prepare-tasks-with-secrets` task](../tasks.html#prepare-tasks-with-secrets). +In earlier steps, you relied on Concourse's native integration with CredHub for interpolation. +That worked because you needed to use the variable +in the pipeline itself, not in one of our inputs. + + You can add it to your job + after you have retrieved the `download-ops-manager.yml` input, + but before the `download-product` task: + + ```yaml hl_lines="16-24" + jobs: + - name: install-ops-manager + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-product + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-ops-manager.yml + ``` + + Notice the [input mappings](https://concourse-ci.org/jobs.html#schema.step.task-step.input_mapping) + of the `prepare-tasks-with-secrets` task. + This allows us to use the output of one task + as in input of another. + + An alternative to `input_mappings` is discussed in + [Configuration Management Strategies](../pipeline-design/configuration-management-strategies.html#advanced-pipeline-design). + +1. Now, the `prepare-tasks-with-secrets` task +will find required credentials in the config files, +and modify the tasks, +so they will pull values from Concourse's integration of CredHub. + + The job will download the product now. + This is a good commit point. + + ```bash + git add pipeline.yml + git commit -m 'download the Ops Manager image' + git push + ``` + +## Creating resources for your Tanzu Operations Manager + +Before Platform Automation Toolkit can create a VM for your Tanzu Operations Manager installation, +there are certain resources required by the VM creation and Tanzu Operations Manager director installation processes. +These resources are created directly on the IaaS of your choice, +and read in as configuration for your Tanzu Operations Manager. + +There are two main ways of creating these resources. +Use the method that is right for you and your setup. + +### Terraform + +These are open source terraforming files +recommended for use because they are maintained by VMware. +These files are found in the open source [`paving`](https://github.com/pivotal/paving) repo on GitHub. + +What follows is the recommended way to get these resources set up. +The output can be used directly in subsequent steps as property configuration. + +The `paving` repo provides instructions for use in the `README` file. +Any manual variables that you need to fill out +are located in a [terraform.tfvars](https://developer.hashicorp.com/terraform/language/values/variables) file, +in the folder for the IaaS you are using. For more specific instructions, see the `README` for that IaaS. + +If there are specific aspects of the `paving` repo that does not work for you, +you can override some of the properties using an [override.tf](https://developer.hashicorp.com/terraform/language/files/override) file. + +Follow these steps to use the `paving` repository: + +1. Clone the repo on the command line: + + ```bash + cd ../ + git clone https://github.com/pivotal/paving.git + ``` + +2. In the checked out repository there are directories for each IaaS. + Copy the terraform templates for the infrastructure of your choice + to a new directory outside of the paving repo, so you can modify it: + + ```bash + # cp -Ra paving/${IAAS} paving-${IAAS} + mkdir paving-${IAAS} + cp -a paving/$IAAS/. paving-$IAAS + cd paving-${IAAS} + ``` + + `IAAS` must be set to match one of the infrastructure directories + at the top level of the `paving` repo; for example, + `aws`, `azure`, `gcp`, or `nsxt`. + +3. In the new directory, the `terraform.tfvars.example` file + shows what values are required for that IaaS. + Remove the `.example` from the filename, + and replace the examples with real values. + +4. Initialize Terraform which will download the required IaaS providers. + + ```bash + terraform init + ``` + +5. Run `terraform refresh` to update the state with what currently exists on the IaaS. + + ```bash + terraform refresh \ + -var-file=terraform.tfvars + ``` + +6. Next, you can run `terraform plan` + to see what changes will be made to the infrastructure on the IaaS. + + ```bash + terraform plan \ + -out=terraform.tfplan \ + -var-file=terraform.tfvars + ``` + +7. Finally, you can run `terraform apply` + to create the required infrastructure on the IaaS. + + ```bash + terraform apply \ + -parallelism=5 \ + terraform.tfplan + ``` + +8. Save the output from `terraform output stable_config` + into a `vars.yml` file in `your-repo-name` for future use: + + ```bash + terraform output stable_config > ../your-repo-name/vars.yml + ``` + +9. Return to your working directory for the post-terraform steps: + + ```bash + cd ../your-repo-name + ``` + +10. Commit and push the updated `vars.yml` file: + + ```bash + git add vars.yml + git commit -m "Update vars.yml with terraform output" + git push + ``` + +### Manual installation + +VMware has extensive documentation to manually create the resources needed +if you are unable or do not wish to use Terraform. +As with the Terraform solution, however, +there are different docs depending on the IaaS +you are installing Tanzu Operations Manager onto. + +When going through the documentation required for your IaaS, +be sure to stop before deploying the Tanzu Operations Manager image. +Platform Automation Toolkit will do this for you. + +- [AWS](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-aws.html) +- [Azure](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-azure.html) +- [GCP](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-gcp.html) +- [OpenStack](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-openstack.html) +- [vSphere](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-vsphere.html) + +

+If you need to install an earlier version of Tanzu Operations Manager, +select your desired version from the version selector at the top of the page.

+ +## Creating the Tanzu Operations Manager VM + +1. Now that you have a Tanzu Operations Manager image and the resources required to deploy a VM, +you can add the new task to the `install-opsman` job. + + ```yaml hl_lines="29-31" + jobs: + - name: install-ops-manager + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - task: prepare-tasks-with-secrets + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-product + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-ops-manager.yml + - task: create-vm + image: platform-automation-image + file: platform-automation-tasks/tasks/create-vm.yml + ``` + +1. If you try to `fly` this up to Concourse, it will again complain +about resources that don't exist, so it's time to make them. +Two new inputs need to be added for `create-vm`: + + * `config` + * `state` + + The optional inputs are vars used with the config, so you will add those when you do the `config`. + + 1. For the config file, write a Tanzu Operations Manager VM Configuration file to `opsman.yml`. + + The properties available vary by IaaS, for example: + + * IaaS credentials + * networking setup (IP address, subnet, security group, etc) + * SSH key + * datacenter/availability zone/region + + Continue with the next section for completing the `opsman.yml` file. + +### Terraform outputs + +If you used the `paving` repository from the [Creating resources for your Tanzu Operations Manager](./installing-opsman.html#creating-resources-for-your-tanzu-operations-manager) section, +the following steps will result in a filled out `opsman.yml`. + +Tanzu Operations Manager must be deployed with the IaaS-specific configuration. + +1. Copy and paste the relevant YAML below for your IaaS, + and save the file as `opsman.yml`. + + **AWS** + + ```yaml + --8<-- "external/paving/ci/configuration/aws/ops-manager.yml" + ``` + + **Azure** + + ```yaml + --8<-- "external/paving/ci/configuration/azure/ops-manager.yml" + ``` + + **GCP** + + ```yaml + --8<-- "external/paving/ci/configuration/gcp/ops-manager.yml" + ``` + + **vSphere+NSXT** + + ```yaml + --8<-- "external/paving/ci/configuration/nsxt/ops-manager.yml" + ``` + + Where: + + * The `((parameters))` in these examples map to outputs from the `terraform-outputs.yml`, + which can be provided via vars file for YAML interpolation in a subsequent step. + +

+For a supported IaaS not listed above, +see the Operations Manager config. +

+ +### Manual configuration + +If you created your infrastructure manually +or would like additional configuration options, +these are the acceptable keys for the `opsman.yml` file for each IaaS. + +

AWS

+ +<%= partial "../examples/opsman-config/aws1" %> + +

Azure

+ +<%= partial "../examples/opsman-config/azure1" %> + +

GCP

+ +<%= partial "../examples/opsman-config/gcp1" %> + +

OpenStack

+ +<%= partial "../examples/opsman-config/openstack1" %> + +

vSphere

+ +<%= partial "../examples/opsman-config/vsphere1" %> + +### Using the Tanzu Operations Manager config file + +1. After you have your config file, commit and push it: + + ```bash + git add opsman.yml + git commit -m "Add opsman config" + git push + ``` + + The `state` input is a placeholder + which will be filled in by the `create-vm` task output. + This will be used later to keep track of the VM so it can be upgraded, + which you can learn about in the [upgrade-how-to](./upgrade-existing-opsman.html). + +1. Add the following to your `resources` section of your `pipeline.yml`. + + ```yaml + - name: vars + type: git + source: + uri: ((pipeline-repo)) + private_key: ((plat-auto-pipes-deploy-key.private_key)) + branch: main + ``` + + This resource definition will allow `create-vm` + to use the variables from `vars.yml` + in the `opsman.yml` file. + +1. Update the `create-vm` task in the `install-opsman` to +use the `download-product` image, +Tanzu Operations Manager configuration file, the +variables file, +and the placeholder state file. + + ```yaml hl_lines="33-37" + jobs: + - name: install-ops-manager + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - get: vars + - task: prepare-tasks-with-secrets + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-product + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-ops-manager.yml + - task: create-vm + image: platform-automation-image + file: platform-automation-tasks/tasks/create-vm.yml + params: + VARS_FILES: vars/vars.yml + input_mapping: + state: config + image: downloaded-product + ``` + +

+ Defaults for tasks: + We do not explicitly set the default parameters + for create-vm in this example. + Because opsman.yml is the default input to + OPSMAN_CONFIG_FILE, it is redundant + to set this param in the pipeline. + See the Task reference + available and default parameters.

+ +1. Now set the pipeline. + + Before you run the job, + [`ensure`](https://concourse-ci.org/jobs.html#schema.step.ensure) that `state.yml` is always persisted + regardless of whether the `install-opsman` job failed or passed. + To do this, you can add the following section to the job: + + ```yaml hl_lines="37-56" + jobs: + - name: install-ops-manager + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: config + - task: prepare-tasks-with-secrets + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-product + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-ops-manager.yml + - task: create-vm + image: platform-automation-image + file: platform-automation-tasks/tasks/create-vm.yml + params: + VARS_FILES: vars/vars.yml + input_mapping: + state: config + image: downloaded-product + ensure: + do: + - task: make-commit + image: platform-automation-image + file: platform-automation-tasks/tasks/make-git-commit.yml + input_mapping: + repository: config + file-source: generated-state + output_mapping: + repository-commit: config-commit + params: + FILE_SOURCE_PATH: state.yml + FILE_DESTINATION_PATH: state.yml + GIT_AUTHOR_EMAIL: "pcf-pipeline-bot@example.com" + GIT_AUTHOR_NAME: "Platform Automation Toolkit Bot" + COMMIT_MESSAGE: 'Update state file' + - put: config + params: + repository: config-commit + merge: true + ``` + +1. Set the pipeline one final time, +run the job, and see it pass. + + ```bash + fly -t control-plane set-pipeline \ + -p foundation \ + -c pipeline.yml + ``` + +1. Commit the final changes to your repository. + + ```bash + git add pipeline.yml + git commit -m "Install Ops Manager in CI" + git push + ``` + +Your install pipeline is now complete. diff --git a/docs/how-to-guides/installing-opsman.md b/docs/how-to-guides/installing-opsman.md deleted file mode 100644 index a68cd1dd..00000000 --- a/docs/how-to-guides/installing-opsman.md +++ /dev/null @@ -1,638 +0,0 @@ -# Writing a Pipeline to Install Ops Manager -This how-to-guide shows you how to write a pipeline for installing a new Ops Manager. -If you already have an Ops Manager VM, check out [Upgrading an Existing Ops Manager][upgrade-how-to]. - -{% include ".getting-started.md" %} - -### Downloading Ops Manager - -We're finally in a position to do work! - -Let's switch out the test job -for one that downloads and installs Ops Manager. -We can do this by changing: - -- the `name` of the job -- the `name` of the task -- the `file` of the task - -Our first task within the job should be [`download-product`][download-product]. -It has an additional required input; -we need the `config` file `download-product` uses to talk to Tanzu Network. - -We'll write that file and make it available as a resource in a moment, -for now, we'll just `get` it -(and reference it in our params) -as if it's there. - -It also has an additional output (the downloaded image). -We're just going to use it in a subsequent step, -so we don't have to `put` it anywhere. - -Finally, while it's fine for `test` to run in parallel, -the install process shouldn't. -So, we'll add `serial: true` to the job, too. - -```yaml hl_lines="2 3 15-20" -jobs: -- name: install-ops-manager - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - task: download-product - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-ops-manager.yml -``` - -If we try to `fly` this up to Concourse, -it will again complain about resources that don't exist. - -So, let's make them. - -The first new resource we need is the config file. -We'll push our git repo to a remote on Github -to make this (and later, other) configuration available to the pipelines. - -Github has good [instructions][git-add-existing] -you can follow to create a new repository on Github. -You can skip over the part -about using `git init` to setup your repo, -since we [already did that](#but-first-git-init). - -Go ahead and setup your remote -and use `git push` to make what we have available. -We will use this repository to hold our single foundation specific configuration. -We are using the ["Single Repository for Each Foundation"][single-foundation-pattern] -pattern to structure our configurations. - -You will also need to add the repository URL -to Credhub so we can reference it -later when we declare the corresponding resource. - -```bash -# note the starting space throughout - credhub set \ - -n /concourse/your_team_name/foundation/pipeline-repo \ - -t value -v git@github.com:username/your-repo-name -``` - -`download-ops-manager.yml` holds creds for communicating with Tanzu Network, -and uniquely identifies an Ops Manager image to download. - -An example `download-ops-manager.yml` is shown below. - -Create a `download-ops-manager.yml` for the IaaS you are using. - -{% include ".opsman-config-tabs.md" %} - -Add and commit the new file: - -```bash -git add download-ops-manager.yml -git commit -m "Add download-ops-manager file for foundation" -git push -``` - -Now that the download-ops-manager file we need is in git, -we need to add a resource to tell Concourse how to get it as `config`. - -Since this is (probably) a private repo, -we'll need to create a deploy key Concourse can use to access it. -Follow [Github's instructions][git-deploy-keys] -for creating a deploy key. - -Then, put the private key in Credhub so we can use it in our pipeline: - -```bash -# note the space at the beginning of the next line - credhub set \ - --name /concourse/your-team-name/plat-auto-pipes-deploy-key \ - --type ssh \ - --private the/filepath/of/the/key-id_rsa \ - --public the/filepath/of/the/key-id_rsa.pub -``` - -Then, add this to the resources section of your pipeline file: - -```yaml -- name: config - type: git - source: - uri: ((pipeline-repo)) - private_key: ((plat-auto-pipes-deploy-key.private_key)) - branch: main -``` - -We'll need to put the Tanzu Network token in Credhub: - -```bash -# note the starting space throughout - credhub set \ - -n /concourse/your_team_name/foundation/pivnet_token \ - -t value -v your-pivnet-token -``` - -{% include './.paths-and-pipeline-names.md' %} - -In order to perform interpolation in one of our input files, -we'll need the [`prepare-tasks-with-secrets` task][prepare-tasks-with-secrets] -Earlier, we relied on Concourse's native integration with Credhub for interpolation. -That worked because we needed to use the variable -in the pipeline itself, not in one of our inputs. - -We can add it to our job -after we've retrieved our `download-ops-manager.yml` input, -but before the `download-product` task: - -```yaml hl_lines="16-24" -jobs: -- name: install-ops-manager - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-product - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-ops-manager.yml -``` - -Notice the [input mappings][concourse-input-mapping] -of the `prepare-tasks-with-secrets` task. -This allows us to use the output of one task -as in input of another. - -An alternative to `input_mappings` is discussed in -[Configuration Management Strategies][advanced-pipeline-design]. - -Now, the `prepare-tasks-with-secrets` task -will find required credentials in the config files, -and modify the tasks, -so they will pull values from Concourse's integration of Credhub. - -The job will download the product now. -This is a good commit point. - -```bash -git add pipeline.yml -git commit -m 'download the Ops Manager image' -git push -``` - -### Creating Resources for Your Ops Manager - -Before Platform Automation Toolkit can create a VM for your Ops Manager installation, -there are certain resources required by the VM creation and Ops Manager director installation processes. -These resources are created directly on the IaaS of your choice, -and read in as configuration for your Ops Manager. - -There are two main ways of creating these resources, -and you should use whichever method is right for you and your setup. - -**Terraform**: - -These are open source terraforming files -we recommend for use, as they are maintained by VMware. -These files are found in the open source [`paving`][paving] repo on GitHub. - -This is the recommended way to get these resources set up -as the output can directly be used in subsequent steps as property configuration. - -The `paving` repo provides instructions for use in the `README`. -Any manual variables that you need to fill out -will be in a [terraform.tfvars][terraform-vars] file -in the folder for the IaaS you are using -(for more specific instruction, please consult the `README` for that IaaS). - -If there are specific aspects of the `paving` repo that does not work for you, -you can override _some_ properties using an [override.tf][terraform-override] file. - -Follow these steps to use the `paving` repository: - -1. Clone the repo on the command line: - - ```bash - cd ../ - git clone https://github.com/pivotal/paving.git - ``` - -1. In the checked out repository there are directories for each IaaS. - Copy the terraform templates for the infrastructure of your choice - to a new directory outside of the paving repo, so you can modify it: - - ```bash - # cp -Ra paving/${IAAS} paving-${IAAS} - mkdir paving-${IAAS} - cp -a paving/$IAAS/. paving-$IAAS - cd paving-${IAAS} - ``` - - `IAAS` must be set to match one of the infrastructure directories - at the top level of the `paving` repo - for example, - `aws`, `azure`, `gcp`, or `nsxt`. - -1. Within the new directory, the `terraform.tfvars.example` file - shows what values are required for that IaaS. - Remove the `.example` from the filename, - and replace the examples with real values. - -1. Initialize Terraform which will download the required IaaS providers. - - ```bash - terraform init - ``` - -1. Run `terraform refresh` to update the state with what currently exists on the IaaS. - - ```bash - terraform refresh \ - -var-file=terraform.tfvars - ``` - -1. Next, you can run `terraform plan` - to see what changes will be made to the infrastructure on the IaaS. - - ```bash - terraform plan \ - -out=terraform.tfplan \ - -var-file=terraform.tfvars - ``` - -1. Finally, you can run `terraform apply` - to create the required infrastructure on the IaaS. - - ```bash - terraform apply \ - -parallelism=5 \ - terraform.tfplan - ``` - -1. Save off the output from `terraform output stable_config` - into a `vars.yml` file in `your-repo-name` for future use: - - ```bash - terraform output stable_config > ../your-repo-name/vars.yml - ``` - -1. Return to your working directory for the post-terraform steps: - - ```bash - cd ../your-repo-name - ``` - -1. Commit and push the updated `vars.yml` file: - - ```bash - git add vars.yml - git commit -m "Update vars.yml with terraform output" - git push - ``` - -**Manual Installation**: - -VMware has extensive documentation to manually create the resources needed -if you are unable or do not wish to use Terraform. -As with the Terraform solution, however, -there are different docs depending on the IaaS -you are installing Ops Manager onto. - -When going through the documentation required for your IaaS, -be sure to stop before deploying the Ops Manager image. -Platform Automation Toolkit will do this for you. - -- [aws][manual-aws] -- [azure][manual-azure] -- [gcp][manual-gcp] -- [openstack][manual-openstack] -- [vsphere][manual-vsphere] - -_NOTE_: if you need to install an earlier version of Ops Manager, -select your desired version from the dropdown at the top of the page. - -### Creating the Ops Manager VM - -Now that we have an Ops Manager image and the resources required to deploy a VM, -let's add the new task to the `install-opsman` job. - -```yaml hl_lines="29-31" -jobs: -- name: install-ops-manager - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - task: prepare-tasks-with-secrets - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-product - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-ops-manager.yml - - task: create-vm - image: platform-automation-image - file: platform-automation-tasks/tasks/create-vm.yml -``` - -If we try to `fly` this up to Concourse, it will again complain about resources that don't exist. - -So, let's make them. - -Looking over the list of inputs for `create-vm` we still need two required inputs: - -1. `config` -1. `state` - -The optional inputs are vars used with the config, so we'll get to those when we do `config`. - -Let's start with the config file. -We'll write an Ops Manager VM Configuration file to `opsman.yml`. - -The properties available vary by IaaS, for example: - -* IaaS credentials -* networking setup (IP address, subnet, security group, etc) -* ssh key -* datacenter/availability zone/region - -#### Terraform Outputs - -If you used the `paving` repository from the [Creating Resources for Your Ops Manager][creating-resources-for-your-ops-manager] section, -the following steps will result in a filled out `opsman.yml`. - -1. Ops Manager needs to be deployed with IaaS specific configuration. - Platform Automation Toolkit provides a configuration file format that looks like this: - - Copy and paste the YAML below for your IaaS - and save as `opsman.yml`. - - === "AWS" - - ```yaml - --8<-- "external/paving/ci/configuration/aws/ops-manager.yml" - ``` - - === "Azure" - - ```yaml - --8<-- "external/paving/ci/configuration/azure/ops-manager.yml" - ``` - - === "GCP" - - ```yaml - --8<-- "external/paving/ci/configuration/gcp/ops-manager.yml" - ``` - - === "vSphere+NSXT" - - ```yaml - --8<-- "external/paving/ci/configuration/nsxt/ops-manager.yml" - ``` - - Where: - {: .tightSpacing } - - * The `((parameters))` in these examples map to outputs from the `terraform-outputs.yml`, - which can be provided via vars file for YAML interpolation in a subsequent step. - - !!! info "`opsman.yml` for an unlisted IaaS" - For a supported IaaS not listed above, - reference the [Platform Automation Toolkit docs](https://docs.pivotal.io/platform-automation/v4.3/inputs-outputs.html#ops-manager-config). - -#### Manual Configuration - -If you created your infrastructure manually -or would like additional configuration options, -these are the acceptable keys for the `opsman.yml` file for each IaaS. - -=== "AWS" - ---excerpt--- "examples/aws-configuration" -=== "Azure" - ---excerpt--- "examples/azure-configuration" -=== "GCP" - ---excerpt--- "examples/gcp-configuration" -=== "Openstack" - ---excerpt--- "examples/openstack-configuration" -=== "vSphere" - ---excerpt--- "examples/vsphere-configuration" - -#### Using the Ops Manager Config file - -Once you have your config file, commit and push it: - -```bash -git add opsman.yml -git commit -m "Add opsman config" -git push -``` - -The `state` input is a placeholder -which will be filled in by the `create-vm` task output. -This will be used later to keep track of the VM so it can be upgraded, -which you can learn about in the [upgrade-how-to][upgrade-how-to]. - -Add the following to your `resources` section of your `pipeline.yml` -```yaml -- name: vars - type: git - source: - uri: ((pipeline-repo)) - private_key: ((plat-auto-pipes-deploy-key.private_key)) - branch: main -``` - -This resource definition will allow `create-vm` -to use the variables from `vars.yml` -in the `opsman.yml` file. - -The `create-vm` task in the `install-opsman` will need to be updated to -use the `download-product` image, -Ops Manager configuration file, -variables file, -and the placeholder state file. - -```yaml hl_lines="33-37" -jobs: -- name: install-ops-manager - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - get: vars - - task: prepare-tasks-with-secrets - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-product - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-ops-manager.yml - - task: create-vm - image: platform-automation-image - file: platform-automation-tasks/tasks/create-vm.yml - params: - VARS_FILES: vars/vars.yml - input_mapping: - state: config - image: downloaded-product -``` - -!!! note "Defaults for tasks" - We do not explicitly set the default parameters - for `create-vm` in this example. - Because `opsman.yml` is the default input to - `OPSMAN_CONFIG_FILE`, it is redundant - to set this param in the pipeline. - Refer to the [task definitions][task-reference] for a full range of the - available and default parameters. - -Set the pipeline. - -Before we run the job, -we should [`ensure`][ensure] that `state.yml` is always persisted -regardless of whether the `install-opsman` job failed or passed. -To do this, we can add the following section to the job: - -```yaml hl_lines="37-56" -jobs: -- name: install-ops-manager - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: config - - task: prepare-tasks-with-secrets - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-product - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-ops-manager.yml - - task: create-vm - image: platform-automation-image - file: platform-automation-tasks/tasks/create-vm.yml - params: - VARS_FILES: vars/vars.yml - input_mapping: - state: config - image: downloaded-product - ensure: - do: - - task: make-commit - image: platform-automation-image - file: platform-automation-tasks/tasks/make-git-commit.yml - input_mapping: - repository: config - file-source: generated-state - output_mapping: - repository-commit: config-commit - params: - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: state.yml - GIT_AUTHOR_EMAIL: "pcf-pipeline-bot@example.com" - GIT_AUTHOR_NAME: "Platform Automation Toolkit Bot" - COMMIT_MESSAGE: 'Update state file' - - put: config - params: - repository: config-commit - merge: true -``` - -Set the pipeline one final time, -run the job, and see it pass. - -```bash -fly -t control-plane set-pipeline \ - -p foundation \ - -c pipeline.yml -``` - -Commit the final changes to your repository. - -```bash -git add pipeline.yml -git commit -m "Install Ops Manager in CI" -git push -``` - -Your install pipeline is now complete. -You are now free to move on to the next steps of your automation journey. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/rotating-certificate-authority.html.md.erb b/docs/how-to-guides/rotating-certificate-authority.html.md.erb new file mode 100644 index 00000000..e6753c86 --- /dev/null +++ b/docs/how-to-guides/rotating-certificate-authority.html.md.erb @@ -0,0 +1,243 @@ +# Writing a pipeline to rotate the foundation Certificate Authority + +This topic shows you how to write a pipeline for rotating the +certificate authority on an existing VMware Tanzu Operations Manager. + +## Prerequisites + +* A pipeline, such as one created in [Installing Tanzu Operations Manager](./installing-opsman.html) + or [Upgrading an existing Tanzu Operations Manager](./upgrade-existing-opsman.html) +* A fully configured Tanzu Operations Manager and Director +* The Platform Automation Toolkit Docker Image [imported and ready to run](./running-commands-locally.html) + +## Creating the pipeline + +You will be creating a new pipeline for this workflow. It will ultimately +rotate the certificate authority for a single foundation via a series of jobs, +but it will start with some resources. + +1. Create a file titled `ca-rotation.yml` with this content: + + ```yaml + --- + resource_types: + - name: pivnet + type: docker-image + source: + repository: pivotalcf/pivnet-resource + tag: latest-final + resources: + - name: platform-automation + type: pivnet + source: + product_slug: platform-automation + api_token: ((pivnet-refresh-token)) + - name: config + type: git + source: + uri: ((pipeline-repo)) + private_key: ((plat-auto-pipes-deploy-key.private_key)) + branch: main + ``` + + This will give us access to the Platform Automation tasks and the environment + configuration file. + +1. Set this new pipeline. + + ```bash + fly -t control-plane set-pipeline -p foundation -c ca-rotation.yml + ``` + +## Generating a Root Certificate Authority + +There are two methods to configure a new root certificate authority in Ops +Manager: + +* Use Tanzu Operations Manager to generate a new certificate authority. +* Give Tanzu Operations Manager an existing root certificate authority to use. + +

+Use with pivotal-container-service tile: +Rotating certificate authority with the pivotal-container-service +tile installed causes warnings in the pipeline. Only certificates +managed by Tanzu Operations Manager will be rotated in this process.

+ +1. For both methods, the first step is to add the job to the pipeline. + + ```yaml + jobs: + - name: configure-new-ca + plan: + - get: env + resource: config + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - task: configure-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-new-certificate-authority.yml + ``` + +1. **Option 1:** Generate a new Certificate Authority. If you want a certificate authority +generated by Tanzu Operations Manager, the job shown in the previous step will create it for you. + +1. **Option 2:** Configure Tanzu Operations Manager to use an existing Certificate Authority. To +provide a certificate authority to the Tanzu Operations Manager to use (for example, +an externally signed certificate), we can provide that to the Tanzu Operations Manager +with an input resource and additional configuration of the task. + + 1. Add an [S3 resource](https://github.com/concourse/s3-resource) and the configuration repo + to the `resources` section: + + ```yaml hl_lines="2-8" + resources: + - name: certificate-authority + type: s3 + source: + access_key_id: ((s3-access-key-id)) + secret_access_key: ((s3-secret-key)) + bucket: ((certificate-authority-bucket)) + regexp: (certificate|privatekey)\.pem + ``` + + 1. Fetch the resource before the task. This will configure the task + to use the `certificate.pem` and `privatekey.pem` files from the s3 resource + as the new CA in Tanzu Operations Manager. + + ```yaml hl_lines="1-2" + - get: certs + resource: certificate-authority + - get: env + resource: config + - task: configure-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-new-certificate-authority.yml + ``` + +1. Apply the changes. After configuring a new certificate authority, Tanzu Operations Manager must apply +changes before the new CA is available to generate and sign certificates. This +also registers the CA with components so that they will trust certificates +from the new CA. This next job will be appended after the `configure-new-ca` +job. + + ```yaml + - name: apply-changes + plan: + - get: env + resource: config + passed: + - configure-new-ca + - task: apply-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + ``` + +1. Activate the new Certificate Authority. Set your new certificate authority as the active certificate authority. After this, any certificates created by the CredHub will be signed by the new CA. +Append a new job to the end of the jobs list. + + ```yaml + - name: activate-new-ca + plan: + - get: env + resource: config + passed: + - apply-changes + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - task: activate-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/activate-certificate-authority.yml + ``` + +1. Regenerate the certificates: + + 1. Non-configurable leaf certificates: Now that a new certificate authority is active, any internal, non-configurable certificates need to be regenerated and signed by the new CA. We are going to add another job that will regenerate the non configurable leaf certificates. + + ```yaml + - name: regenerate-non-configurable-leaf-certificates + plan: + - get: env + resource: config + passed: + - activate-new-ca + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - task: regenerate-certificates + image: platform-automation-image + file: platform-automation-tasks/tasks/regenerate-certificates.yml + ``` + + This will delete the existing certificates from CredHub, which causes CredHub to generate new certificates on the next run of Apply Changes. + + 1. Configurable certificates: Any manually configured certificates that are signed by the foundation root certificate authority need to be regenerated as well. Tanzu Platform for Cloud Foundry needs at least two configurable certificates, one for networking components and one for UAA. + After generating a new certificate, it needs to be configured in Tanzu Operations Manager with a manifest file specific to the certificate.
+ See [rotate CAs and leaf certificates](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/security-pcf-infrastructure-rotate-cas-and-leaf-certs.html) to manually rotate the CAs and certificates. + + + +1. Apply changes to create and use the new certificates. + + ```yaml + - name: apply-certificate-changes + plan: + - get: env + resource: config + passed: + - regenerate-non-configurable-leaf-certificates + - task: apply-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + ``` + +## Cleaning up + +After the function of the foundation is validated with new certificates, the old certificate authority can be deleted by adding a job that will cleanup the certificate authority and apply the changes. + +```yaml +- name: cleanup-certificate-authorities + plan: + - get: env + resource: config + passed: + - apply-certificate-changes + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - task: delete-certificate-authority + image: platform-automation-image + file: platform-automation-tasks/tasks/delete-certificate-authority.yml + - task: apply-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml +``` diff --git a/docs/how-to-guides/rotating-certificate-authority.md b/docs/how-to-guides/rotating-certificate-authority.md deleted file mode 100644 index 2984161a..00000000 --- a/docs/how-to-guides/rotating-certificate-authority.md +++ /dev/null @@ -1,235 +0,0 @@ -# Writing a Pipeline to Rotate the Foundation Certificate Authority - -This how-to-guide shows you how to write a pipeline for rotating the -certificate authority on an existing Ops Manager. - -## Prerequisites -1. A pipeline, such as one created in [Installing Ops Manager][install-how-to] - or [Upgrading an Existing Ops Manager][upgrade-how-to]. -1. A fully configured Ops Manager and Director. -1. The Platform Automation Toolkit Docker Image [imported and ready to run][running-commands-locally]. - -## Creating the Pipeline -We will be creating a new pipeline for this workflow. It will ultimately -rotate the certificate authority for a single foundation via a series of jobs, -but it will start with some resources. Create a file titled `ca-rotation.yml` -with this content: -```yaml ---- -resource_types: -- name: pivnet - type: docker-image - source: - repository: pivotalcf/pivnet-resource - tag: latest-final -resources: -- name: platform-automation - type: pivnet - source: - product_slug: platform-automation - api_token: ((pivnet-refresh-token)) -- name: config - type: git - source: - uri: ((pipeline-repo)) - private_key: ((plat-auto-pipes-deploy-key.private_key)) - branch: main -``` - -This will give us access to the Platform Automation tasks and the environment -configuration file. - -We will also set this new pipeline. -```bash -fly -t control-plane set-pipeline -p foundation -c ca-rotation.yml -``` - -## Generating a Root Certificate Authority -There are two methods to configure a new root certificate authority in Ops -Manager: - -1. Use Ops Manager to generate a new certificate authority. -1. Give Ops Manager an existing root certificate authority to use. - -!!! warning "Use with `pivotal-container-service` tile" - Rotating certificate authority with the pivotal-container-service - tile installed will cause warnings in the pipeline. Only certificates - managed by Ops Manager will be rotated in this process! - -Regardless of method, we are going to add our first job to the pipeline. -```yaml -jobs: -- name: configure-new-ca - plan: - - get: env - resource: config - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - task: configure-new-ca - image: platform-automation-image - file: platform-automation-tasks/tasks/configure-new-certificate-authority.yml -``` - -### Generate a New Certificate Authority -If we want a certificate authority generated by Ops Manager, the above job -will create it for us. - -### Configure Ops Manager to Use an Existing Certificate Authority -If we want to provide a certificate authority to the Ops Manager to use (e.g. -an externally signed certificate), we can provide that to the Ops Manager -with an input resource and additional configuration of the task. - -We'll add an [S3 resource][s3-resource] and the configuration repo -to the `resources` section: -```yaml hl_lines="2-8" -resources: -- name: certificate-authority - type: s3 - source: - access_key_id: ((s3-access-key-id)) - secret_access_key: ((s3-secret-key)) - bucket: ((certificate-authority-bucket)) - regexp: (certificate|privatekey)\.pem -``` -Also, we will fetch the resource before the task. This will configure the task -to use the `certificate.pem` and `privatekey.pem` files from the s3 resource -as the new CA in Ops Manager. -```yaml hl_lines="1-2" - - get: certs - resource: certificate-authority - - get: env - resource: config - - task: configure-new-ca - image: platform-automation-image - file: platform-automation-tasks/tasks/configure-new-certificate-authority.yml -``` - -### Apply Changes -After configuring a new certificate authority, Ops Manager needs to apply -changes before the new CA is available to generate and sign certificates. This -also registers the CA with components so that they will trust certificates -from the new CA. This next job will be appended after the `configure-new-ca` -job. -```yaml -- name: apply-changes - plan: - - get: env - resource: config - passed: - - configure-new-ca - - task: apply-new-ca - image: platform-automation-image - file: platform-automation-tasks/tasks/apply-changes.yml -``` - -## Activate the New Certificate Authority -We need to set our new certificate authority as the active certificate authority. After this, any certificates created by the Credhub will be signed by the new CA. -Append a new job to the end of the jobs list. -```yaml -- name: activate-new-ca - plan: - - get: env - resource: config - passed: - - apply-changes - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - task: activate-new-ca - image: platform-automation-image - file: platform-automation-tasks/tasks/activate-certificate-authority.yml -``` - -## Regenerate Certificates - -### Non-configurable Leaf Certificates -Now that a new certificate authority is active, any internal, non-configurable certificates need to be regenerated and signed by the new CA. We are going to add another job that will regenerate the non configurable leaf certificates. -```yaml -- name: regenerate-non-configurable-leaf-certificates - plan: - - get: env - resource: config - passed: - - activate-new-ca - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - task: regenerate-certificates - image: platform-automation-image - file: platform-automation-tasks/tasks/regenerate-certificates.yml -``` -This will delete the existing certificates from Credhub, which causes Credhub to generate new certificates on the next run of Apply Changes. - -### Configurable Certificates -Any manually configured certificates that are signed by the foundation root certificate authority need to be regenerated as well. Tanzu Application Service needs at least two configurable certificates, one for networking components and one for UAA. -After generating a new certificate, it needs to be configured in Ops Manager with a manifest file specific to the certificate. - -Please check the docs on how to [rotate CAs and leaf certificates](https://docs.pivotal.io/ops-manager/2-10/security/pcf-infrastructure/rotate-cas-and-leaf-certs.html#rotate-config-after-new-root) to manually rotate. - -### Apply Changes -Now we need to apply changes in order to create and use the new certificates. -```yaml -- name: apply-certificate-changes - plan: - - get: env - resource: config - passed: - - regenerate-non-configurable-leaf-certificates - - task: apply-new-ca - image: platform-automation-image - file: platform-automation-tasks/tasks/apply-changes.yml -``` - -## Cleaning Up -Once the function of the foundation is validated with new certificates, the old certificate authority can be deleted by adding a job that will cleanup the certificate authority and applying the changes. -```yaml -- name: cleanup-certificate-authorities - plan: - - get: env - resource: config - passed: - - apply-certificate-changes - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - task: delete-certificate-authority - image: platform-automation-image - file: platform-automation-tasks/tasks/delete-certificate-authority.yml - - task: apply-new-ca - image: platform-automation-image - file: platform-automation-tasks/tasks/apply-changes.yml -``` - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/running-commands-locally.md b/docs/how-to-guides/running-commands-locally.md.html.md.erb similarity index 51% rename from docs/how-to-guides/running-commands-locally.md rename to docs/how-to-guides/running-commands-locally.md.html.md.erb index 1fa69c7a..8881ba08 100644 --- a/docs/how-to-guides/running-commands-locally.md +++ b/docs/how-to-guides/running-commands-locally.md.html.md.erb @@ -1,15 +1,17 @@ +# Running commands locally + This topic describes how to execute commands locally with Docker. -If you wish to use the underlying `om` and `p-automator` CLI tools from your local workstation, -we recommend using docker to execute commands. +If you want to use the underlying `om` and `p-automator` CLI tools from your local workstation, +VMware recommends using Docker to execute commands. -With `p-automator` in particular, using Docker is necessary, -as the IaaS CLIs upon which we depend can be tricky to install. -With `om` it's more a matter of convenience - -you can just as easily [download the binary][om-releases] if it's available for your system. +With `p-automator`, in particular, using Docker is necessary, +because the IaaS CLIs upon which we depend can be tricky to install. +With `om`, it's more a matter of convenience,, and +you can just as easily [download the binary](https://github.com/pivotal-cf/om/releases) if it's available for your system. -## Executing Commands +## Executing commands To execute commands in Docker: @@ -19,9 +21,9 @@ To execute commands in Docker: docker import ${PLATFORM_AUTOMATION_IMAGE_TGZ} platform-automation-image ``` - Where `${PLATFORM_AUTOMATION_IMAGE_TGZ}` is the image file downloaded from Tanzu Network. + Where `${PLATFORM_AUTOMATION_IMAGE_TGZ}` is the image file downloaded from the Broadcom Support portal. -2. Then, you can use `docker run` to pass it arbitrary commands. +2. Now you can use `docker run` to pass it arbitrary commands. Here, we're running the `p-automator` CLI to see what commands are available: @@ -30,12 +32,13 @@ To execute commands in Docker: p-automator -h ``` - Note: that this will have access read and write files in your current working directory. - If you need to mount other directories as well, you can add additional `-v` arguments. +

+ This will have read / write access to files in your current working directory. + If you need to mount other directories as well, you can add more -v arguments.

-## Useful Commands +## Useful commands -### Retrieving Product Staged Configuration +### Retrieving product staged configuration ```bash docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ @@ -43,21 +46,23 @@ To execute commands in Docker: --product-name ${PRODUCT_SLUG} --include-placeholders ``` - `${ENV_FILE}` is the [environment file][env] required for all tasks. - `${PRODUCT_SLUG}` is the name of the product downloaded from [Tanzu Network][tanzu-network]. - The resulting file can then be parameterized, saved, and committed to a config repo. + Where: + * `${ENV_FILE}` is the [environment file](../inputs-outputs.html) required for all tasks. + * `${PRODUCT_SLUG}` is the name of the product downloaded from the [Broadcom Support portal](https://support.broadcom.com/group/ecx/downloads). + +The resulting file can then be parameterized, saved, and committed to a config repo. -### Retrieving Director Configuration +### Retrieving director configuration ```bash docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ om --env ${ENV_FILE} staged-director-config --include-placeholders ``` -### Ops Manager Targeting +### Tanzu Operations Manager targeting -Use environment variables to set what Ops Manager `om` is targeting. -For example: +Use environment variables to set what Tanzu Operations Manager `om` is targeting. +For example: ```bash docker run -it -e "OM_PASSWORD=my-password" --rm -v $PWD:/workspace \ @@ -66,14 +71,14 @@ For example: ``` Note the additional space before the `docker` command. -This ensures the command is not kept in bash history. -The environment variable OM_PASSWORD will overwrite the password value in the `ENV_FILE`. -See the [`om` GitHub page][om] for a full list of supported environment variables. +This ensures that the command is not kept in bash history. +The environment variable `OM_PASSWORD` overwrites the password value in the `ENV_FILE`. +See the [`om` GitHub page](https://github.com/pivotal-cf/om) for a full list of supported environment variables. -### Disable Verifiers +### Disable verifiers In cases where verifiers are incorrectly failing for known reasons, -those specific verifiers should be disabled in order to apply changes. +you can disable those specific verifiers so that you can apply the changes. `om` has commands to disable individual verifiers: For director verifiers: @@ -94,12 +99,13 @@ For product verifiers: --product-name ${PRODUCT_NAME} --type ${VERIFIER_TYPE} ``` -Where `${VERIFIER_TYPE}` is the failing verifier -and `${PRODUCT_NAME}` is the metadata name of the associated product. +Where +* `${VERIFIER_TYPE}` is the failing verifier +* `${PRODUCT_NAME}` is the metadata name of the associated product. A list of failed verifiers is available in the output from the Apply Changes attempt. To retrieve a list of currently failing director and product verifiers -without applying changes (from Ops Manager 2.6 forward) run: +without applying changes, run: ```bash docker run -it -e "OM_PASSWORD=my-password" --rm -v $PWD:/workspace \ @@ -107,9 +113,6 @@ without applying changes (from Ops Manager 2.6 forward) run: om --env ${ENV_FILE} pre-deploy-check ``` -The Ops Manager Documentation [has additional details][opsman-verifiers-docs] about managing verifiers. +See the [Tanzu Operations Manager Documentation](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-granular-verifier-control.html) for information about managing verifiers. -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} + diff --git a/docs/how-to-guides/setting-up-s3.html.md.erb b/docs/how-to-guides/setting-up-s3.html.md.erb new file mode 100644 index 00000000..f46a6b5c --- /dev/null +++ b/docs/how-to-guides/setting-up-s3.html.md.erb @@ -0,0 +1,332 @@ +# Setting up S3 for file Storage + +In this topic, you will learn +how to set up an S3 bucket, +how bucket permissions work, +what we can store in a bucket, +and how a pipeline may be set up +to retrieve and store objects. + +## Why use S3? + +* Platform Automation Toolkit uses and produces +file artifacts that are too large to store in git. +For example, many `.pivotal` product files are several gigabytes in size. +Exported installation files may also be quite large. + +* For environments that can't access the greater internet. + This is a common security practice, + but it also means that it's not possible + to connect directly to the [Broadcom Support portal](https://support.broadcom.com/group/ecx/downloads) + to access the latest product versions for your upgrades. + +The integration of S3 and Concourse +makes it possible to store large file artifacts +and retrieve the latest product versions in offline environments. + +With S3, you can place product files and new versions of Operations Manager +into a network "allow-listed" S3 bucket +to be used by Platform Automation Toolkit tasks. +You can even create a [Resources pipeline](../pipelines/resources.html) +that gets the latest version of products +from the Broadcom Support portal and places them into your S3 bucket automatically. + +Alternatively, because your foundation backup may be quite large, +it is advantageous to persist it in a blobstore +automatically through Concourse. +Exported installations can then later be accessed +through the blobstore. +Because most object stores implement secure, durable solutions, +exported installations in buckets +are easily restorable and persistent. + +## Prerequisites + +1. An [Amazon Web Services account(AWS)](https://aws.amazon.com/s3/) with access to S3 + +

+ S3 blobstore compatibility: Many cloud storage options exist, + including Amazon S3, + Google Storage, + Minio, + and Azure Blob Storage. + However, not all object stores are "S3 compatible." + Because Amazon defines the S3 API for accessing blobstores, + and because the Amazon S3 product has emerged as the dominant blob storage solution, + not all "S3 compatible" object stores have exactly the same behavior. + In general, if a storage solution claims to be "S3 compatible," + it should work with the Concourse S3 resource integration. + But note that it may behave differently if interacting directly with the S3 API. + Use the documentation for your preferred blobstore solution + when setting up storage.

+ +1. Set up S3. With your AWS account, navigate to [the S3 console](https://aws.amazon.com/console/) +and sign up for S3. Follow the on-screen prompts. +Now you are ready for buckets. + +

+ AWS root user: + When you sign up for the S3 service on Amazon, + the account with the email and password you use + is the AWS account root user. + As a best practice, you should not use the root user + to access and manipulate services. + Instead, use AWS Identity and Access Management (IAM) + to create and manage users. + For more information about how this works, + see the Amazon IAM guide. +
+ For simplicity, the rest of this guide uses the AWS root user + to show how a bucket can be set up and used with Platform Automation Toolkit.

+ + +## Your first bucket + +S3 stores data as objects in buckets. +An object is any file that can be stored on a file system. +Buckets are the containers for objects. +Buckets can have permissions for who can +create, write, delete, and see objects in that bucket. + +1. Go to [the S3 console](https://aws.amazon.com/console/). +2. Click **Create bucket**. +3. Enter a DNS-compliant name for your new bucket. + - This name must be unique across all of AWS S3 buckets + and adhere to general URL guidelines. + Make it something meaningful and memorable. +4. Enter the **Region** you want the bucket to reside in. +5. Click **Create**. + +This creates a bucket with the default S3 settings. +Bucket permissions and settings +can be set during bucket creation or changed later. +Bucket settings can even be copied from other buckets you have. +For a detailed look at creating buckets +and managing initial settings, see +[Creating a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html). + +## Bucket permissions + +By default, only the AWS account owner +can access S3 resources, including buckets and objects. +The resource owner may allow public access, +allow specific IAM users permissions, +or create a custom access policy. + +To view bucket permissions, +from the S3 console, +look at the "Access" column. + +Amazon S3 has the following Access permissions: + +- **Public**: Everyone has access to one or more of the following: +List objects, Write objects, Read and write permissions. +- **Objects can be public**: The bucket is not public, +but anyone with appropriate permissions can grant public access to objects. +- **Buckets and objects not public**: The bucket and objects do not have any public access. +- **Only authorized users of this account**: Access is isolated to IAM users and roles. + +To change who can access buckets or objects in buckets: + +1. Go to [the S3 console](https://aws.amazon.com/console/). +2. Select the name of the bucket you created in the previous step. +3. In the top row, select **Permissions**. + +In this tab, +you can set the various permissions +for an individual bucket. +For simplicity, in this guide, we will use public permissions +so that Concourse can access the files. + +1. Under the permissions tab for a bucket, choose **Public access settings**. +1. Click **Edit** to change the public access settings. +1. Unselect all check boxes to allow public access. + +In general, the credentials being used +to access an S3 compatible blobstore through Concourse +must have `Read` and `Write` permissions. +It is possible to use different user roles +with different credentials to separate users who can `Read` +objects from the bucket and users who can `Write` objects to the bucket. + +

+Amazon S3 provides many +permission settings for buckets. +Specific IAM users can have access and objects can have their own permissions. In addition, buckets can have their own custom policies. +See Configuring ACLs. +Refer to your organization's security policy for the +best way to set up your S3 bucket.

+ +## Object versions + +By default, an S3 bucket will be unversioned. +An unversioned bucket will not allow different versions of the same object. +In order to take advantage of using an S3 bucket with Platform Automation Toolkit, +we will want to enable versioning. Enabling versioning is not required, +but versioning does make the process easier, +and will require less potential manual steps around naming updates to the new file +whenever they are changed. + +1. Go to the [S3 console](https://aws.amazon.com/console/). +2. Select the name of the bucket you created in the previous step. +3. Click the **Properties** tab. +4. Click the **Versioning** tile. +5. Select **Enable Versioning**. + +Now that versioning is enabled, +we can store multiple versions of a file. +For example, given the following object: +``` +my-exported-installation.zip +``` +We can now have multiple versions of this object stored in our S3 bucket: +``` +my-exported-installation.zip (version 111111) +my-exported-installation.zip (version 121212) +``` + +## Storing files in S3 + +Any file that can be stored on a computer +can be stored on S3. S3 is especially good at storing large files +because it is designed to scale with large amounts of data while +still being durable and fast. + +Platform Automation Toolkit users may want to store the following files in S3: + +- `.pivotal` product files +- `.tgz` stemcell files +- `.ova` Operations Manager files +- `.zip` foundation exports + +You should probably **_not_** store the following in S3: + +- `.yaml` configuration files - Git is better suited for this +- `secrets.yaml` environment and secret files - There are a number of ways +to handle these types of files, but they should not be stored in S3. +See [Using a secrets store to store credentials](../concepts/secrets-handling.html) +for information about working with these types of files. + +## Structuring your bucket + +Buckets can have folders and any number of sub-folders. +The following sample shows one way to set up your bucket file structure: + +``` +├── foundation-1 +│   ├── products +│   │   ├── healthwatch +│ │ │ healthwatch.pivotal +│   │   ├── pas +│ │ │ pas.pivotal +│   │   └── ... +│ │ +│   ├── stemcells +│   │   ├── healthwatch-stemcell +│ │ │ ubuntu-xenial.tgz +│   │   ├── pas-stemcell +│ │ │ ubuntu-xenial.tgz +│   │   └── ... +│ │ +│ ├── foundation1-exports +│      foundation1-installation.zip + +``` + +When viewing a bucket in the AWS S3 console, +click **Create Folder**. +To create a sub-folder, select **Create Folder** again. + +When attempting to access a specific object in a folder, +include the folder structure before the object name: + +``` +foundation1/products/healthwatch/my-healthwatch-product.pivotal +``` + +## Using a bucket + +When using the [Concourse S3 Resource](https://github.com/concourse/s3-resource), +several configuration properties are available +for retrieving objects. The bucket name is required. + +For your Concourse to have access to your S3 bucket, +ensure that you have the appropriate firewall and networking settings +to allow your Concourse instance to +make requests to your bucket. +Concourse uses various "outside" resources +to perform certain jobs. +Ensure that Concourse can communicate with your S3 bucket. + + +## Reference resources pipeline + +The [resources pipeline](../pipelines/resources.html) +may be used to download dependencies from the Broadcom Support portal +and place them into a trusted S3 bucket. +The various `resources_types` use the [Concourse S3 Resource type](https://github.com/concourse/s3-resource) +and several Platform Automation Toolkit tasks to accomplish this. +The following is an S3-specific breakdown of these components +and where to find more information. + +#### The download-product task + +The [`download-product`](../tasks.html#download-product) task lets you download products from the Broadcom Support portal. +If S3 properties are set in the [download config](../inputs-outputs.html#download-product-config), +these files can be put into an S3 bucket. + +If S3 configurations are set, +this task will perform a specific filename operation +that will prepend metadata to the filename. +If you are downloading: + +* product `Example Product version 2.2.1` from the Broadcom Support portal +* with product slug `example-product` +* and version is `2.2.1` + +When downloaded directly from the Broadcom Support portal, the file might look like this: + +``` +product-2.2-build99.pivotal +``` + +Because the Broadcom Support portal file names +do not always have the necessary metadata required by Platform Automation Toolkit, +the download product task will prepend the necessary information +to the filename before it is placed in the S3 bucket: + +``` +[example-product,2.2.1-build99]product-2.2-build99.pivotal +``` + +

+Do not change the meta information prepended by download-product. +This information is required +if using a download-product with a blobstore (that is, AWS, GCS) +to properly parse product versions. +
+If placing a product file into an blobstore bucket manually, +ensure that it has the proper file name format; +opening bracket, the product slug, a single comma, the product's version, and finally, closing bracket. +There should be no spaces between the two brackets. +For example, for a product with slug of product-slug and version of 1.1.1: +
+ +[product-slug,1.1.1]original-filename.pivotal + +

+ +The [`download-product`](../tasks.html#download-product) +task lets you download products from an blobstore bucket if you define the `SOURCE` param. +The prefixed metadata added by `download-product` with `SOURCE: pivnet` is used to find the appropriate file. +This task uses the same [download-product config file](../inputs-outputs.html#download-product-config) +as `download-product` to ensure consistency +between what is `put` in the blobstore +and what is being accessed later. + +`download-product` with `SOURCE: pivnet` and `download-product` with `SOURCE: s3|gcs|azure` are designed +to be used together. +The download product config should be different between the two tasks. + +For complete information on this task +and how it works, see the [Task Reference](../tasks.html#download-product). diff --git a/docs/how-to-guides/setting-up-s3.md b/docs/how-to-guides/setting-up-s3.md deleted file mode 100644 index c116f6dd..00000000 --- a/docs/how-to-guides/setting-up-s3.md +++ /dev/null @@ -1,351 +0,0 @@ -In this guide, -you will learn -how to set up an S3 bucket, -how bucket permissions work, -what we can store in a bucket, -and how a pipeline may be set up -to retrieve and store objects. - -## Why use S3? - -* Platform Automation Toolkit uses and produces -file artifacts that are too large to store in git. -For example, many `.pivotal` product files are several gigabytes in size. -Exported installation files may also be quite large. - -* For environments that can't access the greater internet. - This is a common security practice, - but it also means that it's not possible - to connect directly to Tanzu Network - to access the latest product versions for your upgrades. - -S3 and Concourse's native S3 integration -makes it possible to store large file artifacts -and retrieve the latest product versions in offline environments. - -With S3, we can place product files -and new versions of OpsMan -into a network allow-listed S3 bucket -to be used by Platform Automation Toolkit tasks. -We can even create a [Resources Pipeline][reference-resources] -that gets the latest version of products -from Tanzu Network and places them into our S3 bucket automatically. - -Alternatively, because a foundation's backup -may be quite large, -it is advantageous to persist it in a blobstore -automatically through Concourse. -Exported installations can then later be accessed -through the blobstore. -Because most object stores -implement secure, durable solutions, -exported installations in buckets -are easily restorable -and persistent. - -## Prerequisites - -1. An [Amazon Web Services account][amazon-s3] (commonly referred to as AWS) with access to S3 - -!!! info "S3 blobstore compatibility" - Many cloud storage options exist - including [Amazon S3][amazon-s3], - [Google Storage][gcp-storage], - [Minio][minio], - and [Azure Blob Storage][azure-blob-storage]. - However, not all object stores - are "S3 compatible". - Because Amazon defines the - S3 API for accessing blobstores, - and because the Amazon S3 product has emerged as the dominant blob storage solution, - not all "S3 compatible" object stores act exactly the same. - In general, if a storage solution claims to be "S3 compatible", - it should work with the [Concourse's S3 resource integration][concourse-s3-resource]. - But note that it may behave differently if interacting directly with the S3 API. - Defer to the documentation of your preferred blobstore solution - when setting up storage. - -2. Set up S3. With your AWS account, -navigate to [the S3 console][amazon-s3-console] -and sign up for S3. -Follow the on screen prompts. -Now you are ready for buckets! - -!!! tip "AWS Root User" - When you sign up for the S3 service on Amazon, - the account with the email and password you use - is the AWS account root user. - As a best practice, - you should not use the root user - to access and manipulate services. - Instead, use [AWS Identity and Access Management][amazon-iam] - (commonly refered to as IAM) - to create and manage users. - For more info on how this works, - check out this [guide from Amazon][amazon-iam-guide]. - - For simplicity, in the rest of this guide, - we will use the AWS root user - to show how a bucket may be set up and used with Platform Automation Toolkit. - - -## Your First Bucket - -S3 stores data as objects within buckets. -An object is any file that can be stored on a file system. -Buckets are the containers for objects. -Buckets can have permissions for who can -create, write, delete, and see objects within that bucket. - -1. Navigate to [the S3 console][amazon-s3-console] -1. Click the "Create bucket" button -1. Enter a DNS-compliant name for your new bucket - - This name must be unique across all of AWS S3 buckets - and adhere to general URL guidelines. - Make it something meaningful and memorable! -1. Enter the "Region" you want the bucket to reside in -1. Choose "Create" - -This creates a bucket with the default S3 settings. -Bucket permissions and settings -can be set during bucket creation or changed afterwards. -Bucket settings can even be copied from other buckets you have. -For a detailed look at creating buckets -and managing initial settings, -check out [this documentation on creating buckets.][amazon-s3-create-bucket] - -## Bucket Permissions - -By default, only the AWS account owner -can access S3 resources, including buckets and objects. -The resource owner may allow public access, -allow specific IAM users permissions, -or create a custom access policy. - -To view bucket permissions, -from the S3 console, -look at the "Access" column. - -Amazon S3 has the following Access permissions: - -- *Public* - Everyone has access to one or more of the following: -List objects, Write objects, Read and write permissions -- *Objects can be public* - The bucket is not public. -But anyone with appropriate permissions can grant public access to objects. -- *Buckets and objects not public* - The bucket and objects do not have any public access. -- *Only authorized users of this account* - Access is isolated to IAM users and roles. - -In order to change who can access buckets or objects in buckets: - -1. Navigate to [the S3 console][amazon-s3-console]. -1. Choose the name of the bucket you created in the previous step -1. In the top row, choose "Permissions" - -In this tab, -you can set the various permissions -for an individual bucket. -For simplicity, in this guide, we will use public permissions -for Concourse to access the files. - -1. Under the permissions tab for a bucket, choose "Public access settings" -1. Choose "Edit" to change the public access settings -1. Uncheck all boxes to allow public access. - -In general, the credentials being used -to access an S3 compatible blobstore through Concourse -must have `Read` and `Write` permissions. -It is possible to use different user roles -with different credentials -to seperate which user can `Read` -objects from the bucket -and which user can `Write` objects to the bucket. - -!!! Info "Permissions" - Amazon S3 provides many [permission settings for buckets][amazon-s3-permissions]. - Specific [IAM users can have access][amazon-s3-permissions-iam]. - Objects can have [their own permissions][amazon-s3-permissions-objects]. - And buckets can even have their own [custom Bucket Policies][amazon-s3-permissions-policies]. - Refer to your organization's security policy - to best set up your S3 bucket. - -## Object Versions - -By default, -an S3 bucket will be _unversioned_. -An unversioned bucket will not allow different versions of the same object. -In order to take advantage of using an S3 bucket with Platform Automation Toolkit, -we will want to enable versioning. Enabling versioning is not required, -but versioning does make the process easier, -and will require less potential manual steps around naming updates to the new file -whenever they are changed. - -1. Navigate to [the S3 console][amazon-s3-console] -1. Choose the name of the bucket you created in the previous step -1. Select the "Properties" tab -1. Click the "Versioning" tile -1. Check the "Enable Versioning" - -Now that versioning is enabled, -we can store multiple versions of a file. -For example, given the following object: -``` -my-exported-installation.zip -``` -We can now have multiple versions of this object stored in our S3 bucket: -``` -my-exported-installation.zip (version 111111) -my-exported-installation.zip (version 121212) -``` - -## Storing Files in S3 - -Any file that can be stored on a computer -can be stored on S3. S3 is especially good at storing large files as it is designed to scale with large amounts of data while still being durable and fast. - -Platform Automation Toolkit users may want to store the following files in S3: - -- `.pivotal` product files -- `.tgz` stemcell files -- `.ova` Ops Manager files -- `.zip` foundation exports - -Platform Automation Toolkit users will likely **_NOT_** want to store the following in S3: - -- `.yaml` configuration files - Better suited for [git][git] -- `secrets.yaml` environment and secret files - There are a number of ways -to handle these types of files, -but they should not be stored in S3. -Check out the [Secrets Handling page][secrets-handling] -for how to work with these types of files. - -## Structuring your Bucket - -Like any computer, buckets can have folders -and any number of sub-folders. -The following is one way to set up your bucket's file structure: - -``` -├── foundation-1 -│   ├── products -│   │   ├── healthwatch -│ │ │ healthwatch.pivotal -│   │   ├── pas -│ │ │ pas.pivotal -│   │   └── ... -│ │ -│   ├── stemcells -│   │   ├── healthwatch-stemcell -│ │ │ ubuntu-xenial.tgz -│   │   ├── pas-stemcell -│ │ │ ubuntu-xenial.tgz -│   │   └── ... -│ │ -│ ├── foundation1-exports -│      foundation1-installation.zip - -``` - -When viewing a bucket in the AWS S3 console, -simple select "Create Folder". -To create a sub-folder, -when viewing a specific folder, -select "Create Folder" again. - -When attempting to access a specific object in a folder, -simply include the folder structure before the object name: - -``` -foundation1/products/healthwatch/my-healthwatch-product.pivotal -``` - -## Using a Bucket - -When using the [Concourse S3 Resource][concourse-s3-resource], -several configuration properties are available -for retrieving objects. The bucket name is required. - -!!! Info "On networking and accessing a bucket" - In order for your Concourse - to have access to your S3 bucket, - ensure that you have the appropriate firewall and networking settings - for your Concourse instance to - make requests to your bucket. - Concourse uses various "outside" resources - to perform certain jobs. - Ensure that Concourse can "talk" to your S3 bucket. - - -## Reference Resources Pipeline - -The [resources pipeline][reference-resources] -may be used to download dependencies from Tanzu Network -and place them into a trusted S3 bucket. -The various `resources_types` use the [Concourse S3 Resource type][concourse-s3-resource] -and several Platform Automation Toolkit tasks to accomplish this. -The following is an S3-specific breakdown of these components -and where to find more information. - -#### The download-product task - -The [`download-product`][download-product] task lets you download products from Tanzu Network. -If S3 properties are set in the [download config][download-product-config], -these files can be placed into an S3 bucket. - -If S3 configurations are set, -this task will perform a specific filename operation -that will prepend meta data to the filename. -If downloading the product `Example Product version 2.2.1` from Tanzu Network -where the product slug is `example-product` and the version is `2.2.1`, -when directly downloaded from Tanzu Network, the file may appear as: - -``` -product-2.2-build99.pivotal -``` - -Because Tanzu Network file names -do not always have the necessary metadata required by Platform Automation Toolkit, -the download product task will prepend the necessary information -to the filename before it is placed into the S3 bucket: - -``` -[example-product,2.2.1-build99]product-2.2-build99.pivotal -``` - -For complete information on this task -and how it works, refer to the [download-product task reference.][download-product] - -!!! warning "Changing S3 file names" - Do not change the meta information - prepended by `download-product`. - This information is required - if using a `download-product` with a blobstore (i.e. aws, gcs) - in order to properly parse product versions. - - If placing a product file into an blobstore bucket manually, - ensure that it has the proper file name format; - opening bracket, the product slug, a single comma, the product's version, and finally, closing bracket. - There should be no spaces between the two brackets. - For example, for a product with slug of `product-slug` and version of `1.1.1`: - ``` - [product-slug,1.1.1]original-filename.pivotal - ``` - -#### The download-product task -The [`download-product`][download-product] -task lets you download products from an blobstore bucket if you define the `SOURCE` param. -The prefixed metadata added by `download-product` with `SOURCE: pivnet` is used to find the appropriate file. -This task uses the same [download-product config file][download-product-config] -as `download-product` to ensure consistency -across what is `put` in the blobstore -and what is being accessed later. -`download-product` with `SOURCE: pivnet` and `download-product` with `SOURCE: s3|gcs|azure` are designed -to be used together. -The download product config should be different between the two tasks. - -For complete information on this task -and how it works, refer to the [download-product task reference.][download-product] - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/how-to-guides/upgrade-existing-opsman.html.md.erb b/docs/how-to-guides/upgrade-existing-opsman.html.md.erb new file mode 100644 index 00000000..f9da20bc --- /dev/null +++ b/docs/how-to-guides/upgrade-existing-opsman.html.md.erb @@ -0,0 +1,906 @@ +# Writing a pipeline to upgrade an existing VMware Tanzu Operations Manager + +This how-to-guide shows you how to create a pipeline for upgrading an existing Vmware Tanzu Operations Manager VM. +If you don't have a Tanzu Operations Manager VM, see [Installing Tanzu Operations Manager](./installing-opsman.html). + +<%= partial "getting-started" %> + +## Exporting the installation + +Before upgrading Tanzu Operations Manager, +you must first download and persist +an export of the current installation. + +

+VMware strongly recommends automatically exporting +the Tanzu Operations Manager installation +and persisting it to your blobstore on a regular basis. +This ensures that if you need to upgrade (or restore) +your Tanzu Operations Manager for any reason, +you'll have the latest installation info available. +A time trigger is added later in this tutorial +to help with this.

+ +1. First, switch out the test job +for one that downloads and installs Tanzu Operations Manager. +Do this by changing: + + - the `name` of the job + - the `name` of the task + - the `file` of the task + + The first task in the job should be [`download-product`](../tasks.html#download-product). + It has an additional required input; + the `config` file `download-product` uses to talk to Tanzu Network. + +1. Before writing that file and making it available as a resource, +`get` it (and reference it in the params) +as if it's there. + + It also has an additional output (the downloaded image). + It will be used in a subsequent step, + so you don't have to `put` it anywhere. + +1. Finally, while it's fine for `test` to run in parallel, +the install process shouldn't, so +you also need to add `serial: true` to the job. + + ```yaml hl_lines="2 3 15-21" + jobs: + - name: export-installation + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: env + - task: export-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/export-installation.yml + - put: installation + params: + file: installation/installation-*.zip + ``` + + + 1. If you try to `fly` this up to Concourse, + it will again throw errors about resources that don't exist, + so the next step is to make them. + The first new resource you need is the config file. + 2. Push your git repo to a remote on GitHub + to make this (and later, other) configuration available to the pipelines. + GitHub has good [instructions](https://docs.github.com/en/migrations/importing-source-code/using-the-command-line-to-import-source-code/adding-locally-hosted-code-to-github) + you can follow to create a new repository on GitHub. + You can skip over the part + about using `git init` to set up your repo, + since you did that earlier. + +1. Now set up your remote +and use `git push` to make it available. +You will use this repository to hold our single foundation specific configuration. +These instructions use the ["Single repository for each Foundation"](../pipeline-design/configuration-management-strategies.html#single-foundation-pattern) +pattern to structure the configurations. + +1. Add the repository URL to CredHub so that you can reference it +later when you declare the corresponding resource. + + ```yaml + pipeline-repo: git@github.com:username/your-repo-name + ``` + +1. Write an `env.yml` for your Tanzu Operations Manager. + + `env.yml` holds authentication and target information + for a particular Tanzu Operations Manager. + + An example `env.yml` for username/password authentication + is shown below with the required properties. + Please reference [Configuring Env](./configuring-env.html) for the entire list of properties + that can be used with `env.yml` + as well as an example of an `env.yml` + that can be used with UAA (SAML, LDAP, etc.) authentication. + + The property `decryption-passphrase` is required for `import-installation`, + and therefore required for `upgrade-opsman`. + + If your foundation uses authentication other than basic auth, + see [Inputs and Outputs](../inputs-outputs.html) + for more detail on UAA-based authentication. + + + ```yaml + target: ((opsman-url)) + username: ((opsman-username)) + password: ((opsman-password)) + decryption-passphrase: ((opsman-decryption-passphrase)) + ``` + +1. Add and commit the new `env.yml` file: + + ```bash + git add env.yml + git commit -m "Add environment file for foundation" + git push + ``` + +2. Now that the env file is in your git remote, +you can add a resource to tell Concourse how to get it as `env`. + + Since this is (probably) a private repo, + you must create a deploy key Concourse can use to access it. + Follow the [GitHub instructions](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/managing-deploy-keys#deploy-keys) + for creating a deploy key. + +1. Place the private key in CredHub so you can use it in your pipeline: + + ```bash + # note the starting space + credhub set \ + --name /concourse/your-team-name/plat-auto-pipes-deploy-key \ + --type ssh \ + --private the/filepath/of/the/key-id_rsa \ + --public the/filepath/of/the/key-id_rsa.pub + ``` + +2. Add this to the resources section of your pipeline file: + + ```yaml + - name: env + type: git + source: + uri: ((pipeline-repo)) + private_key: ((plat-auto-pipes-deploy-key.private_key)) + branch: main + ``` + +3. Put the credentials in CredHub: + + ```bash + # note the starting space throughout + credhub set \ + -n /concourse/your-team-name/foundation/opsman-username \ + -t value -v your-opsman-username + credhub set \ + -n /concourse/your-team-name/foundation/opsman-password \ + -t value -v your-opsman-password + credhub set \ + -n /concourse/your-team-name/foundation/opsman-decryption-passphrase \ + -t value -v your-opsman-decryption-passphrase + ``` + + <%= partial "./paths-and-pipeline-names" %> + +1. To perform interpolation in one of your input files, +you need the [`credhub-interpolate` task](../tasks.html#credhub-interpolate) +Earlier, you relied on Concourse's native integration with CredHub for interpolation. +That worked because you needed to use the variable +in the pipeline itself, not in one of your inputs. + + You can add it to your job + after retrieving your `env` input, + but before the `export-installation` task: + + ```yaml hl_lines="16-26" + jobs: + - name: export-installation + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: env + - task: credhub-interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/credhub-interpolate.yml + params: + CREDHUB_CLIENT: ((credhub-client)) + CREDHUB_SECRET: ((credhub-secret)) + CREDHUB_SERVER: https://your-credhub.example.com + PREFIX: /concourse/your-team-name/foundation + input_mapping: + files: env + output_mapping: + interpolated-files: interpolated-env + - task: export-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/export-installation.yml + input_mapping: + env: interpolated-env + - put: installation + params: + file: installation/installation-*.zip + ``` + +

+ The credhub-interpolate task for this job + maps the output from the task (interpolated-files) + to interpolated-env. + This can be used by the next task in the job + to more explicitly define the inputs/outputs of each task. + It is also okay to leave the output as interpolated-files + if it is appropriately referenced in the next task.

+ + Notice the [input mappings](https://concourse-ci.org/jobs.html#schema.step.task-step.input_mapping) + of the `credhub-interpolate` and `export-installation` tasks. + This allows you to use the output of one task + as in input of another. + + An alternative to `input_mappings` is discussed in + [Configuration Management Strategies](../pipeline-design/configuration-management-strategies.html#advanced-pipeline-design). + +1. Put your `credhub_client` and `credhub_secret` into CredHub, +so Concourse's native integration can retrieve them +and pass them as configuration to the `credhub-interpolate` task. + + ```bash + # note the starting space throughout + credhub set \ + -n /concourse/your-team-name/credhub-client \ + -t value -v your-credhub-client + credhub set \ + -n /concourse/your-team-name/credhub-secret \ + -t value -v your-credhub-secret + ``` + + Now, the `credhub-interpolate` task + will interpolate our config input, + and pass it to `export-installation` as `config`. + +1. The other new resource you need is a blobstore, +so you can persist the exported installation. + + Add an [S3 resource](https://github.com/concourse/s3-resource) + to the `resources` section: + + ```yaml + - name: installation + type: s3 + source: + access_key_id: ((s3-access-key-id)) + secret_access_key: ((s3-secret-key)) + bucket: ((platform-automation-bucket)) + regexp: installation-(.*).zip + ``` + +1. Save the credentials in CredHub: + + ```bash + # note the starting space throughout + credhub set \ + -n /concourse/your-team-name/s3-access-key-id \ + -t value -v your-bucket-s3-access-key-id + credhub set \ + -n /concourse/your-team-name/s3-secret-key \ + -t value -v your-s3-secret-key + ``` + +1. This time (and in the future), +when you set the pipeline with `fly`, +you need to load vars from `vars.yml`. + + ```bash + # note the space before the command + fly -t control-plane set-pipeline \ + -p foundation \ + -c pipeline.yml \ + -l vars.yml + ``` + +1. Manually trigger a build. This time, it should pass. + +

+ You'll be using this, + the ultimate form of the fly command to set your pipeline, + for the rest of the tutorial. +
+ You can save yourself some typing by using your bash history + (if you did not prepend your command with a space). + You can cycle through previous commands with the up and down arrows. + Alternatively, + Ctrl-r will search your bash history. + Holding Ctrl-r, type fly, + and you will see the last fly command you ran. + Run it with enter, or + instead of running it, + use Ctrl-r again + to see the matching command before that.

+ +1. This is a good commit point: + + ```bash + git add pipeline.yml vars.yml + git commit -m "Export foundation installation in CI" + git push + ``` + +## Performing the upgrade + +Now that you have an exported installation, +it's time to create another Concourse job to do the upgrade itself. +The export and the upgrade need to be in separate jobs +so they can be triggered (and re-run) independently. + +This new job uses the [`upgrade-opsman`](../tasks.html#upgrade-opsman) task. + +1. Write a new job that has `get` steps +for your platform-automation resources +and all the inputs you already know how to get: + + ```yaml + - name: upgrade-opsman + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: env + - get: installation + ``` + +2. Do a commit here. The job doesn't do anything useful yet, but +it's a good place to start. + + ```bash + git add pipeline.yml + git commit -m "Set up initial gets for upgrade job" + git push + ``` + +

+ We recommend frequent, small commits that can be fly set and, + ideally, go green. +
+ This one doesn't actually do anything though, right? + Fair, but: setting and running the job + gives you feedback on your syntax and variable usage. + It can catch typos, resources you forgot to add or misnamed, and so on. + Committing when you get to a working point helps keeps the diffs small, + and the history tractable. + Also, down the line, if you've got more than one pair working on a foundation, + the small commits help you keep off one another's toes. +
+ This workflow is not demonstrated here, + but it can even be useful to make a commit, + use fly to see if it works, + and then push it if and only if it works. + If it doesn't, you can use git commit --amend + once you've figured out why and fixed it. + This workflow makes it easy to keep what is set on Concourse + and what is pushed to your source control remote in sync.

+ +1. You need the three required inputs for [`upgrade-opsman`](../tasks.html#upgrade-opsman). + + * `state` + * `config` + * `image` + + There are optional inputs, vars used with the config, + and you can add those when you do `config`. + +2. Start with the [state file](../inputs-outputs.html#state). +Record the `iaas` your're using and the ID of the _currently deployed_ Tanzu Operations Manager VM. +Different IaaS uniquely identify VMs differently; +here are examples for what this file should look like, +depending on your IaaS: + + **AWS** + + <%= partial "../examples/state/aws" %> + + **Azure** + + <%= partial "../examples/state/azure" %> + + **GCP** + + <%= partial "../examples/state/gcp" %> + + **OpenStack** + + <%= partial "../examples/state/openstack" %> + + **vSphere** + + <%= partial "../examples/state/vsphere" %> + +1. Choose the IaaS you need for your IaaS, +write it in your repo as `state.yml`, +commit it, and push it: + + ```bash + git add state.yml + git commit -m "Add state file for foundation Ops Manager" + git push + ``` + + You can map the `env` resource to the [`upgrade-opsman`](../tasks.html#upgrade-opsman) + `state` input after you add the task. + + But first, there are two more inputs to arrange for. + +1. Option 1: Write a [Tanzu Operations Manager VM Configuration file](../inputs-outputs.html#opsman-config) +to `opsman.yml`. +The properties available vary by IaaS, +but you can often inspect your existing Tanzu Operations Manager +in your IaaS console +(or, if your Tanzu Operations Manager was created with Terraform, +look at your Terraform outputs) +to find the necessary values. + + **AWS** + +
+      
+        ---
+        opsman-configuration:
+          aws:
+            region: us-west-2
+            vpc_subnet_id: subnet-0292bc845215c2cbf
+            security_group_ids: [ sg-0354f804ba7c4bc41 ]
+            key_pair_name: ops-manager-key  # used to SSH to VM
+            iam_instance_profile_name: env_ops_manager
+
+            # At least one IP address (public or private) needs to be assigned to the
+            # VM. It is also permissible to assign both.
+            public_ip: 1.2.3.4      # Reserved Elastic IP
+            private_ip: 10.0.0.2
+
+            # Optional
+            # vm_name: ops-manager-vm    # default - ops-manager-vm
+            # boot_disk_size: 100        # default - 200 (GB)
+            # instance_type: m5.large    # default - m5.large
+                                        # NOTE - not all regions support m5.large
+            # assume_role: "arn:aws:iam::..." # necessary if a role is needed to authorize
+                                              # the OpsMan VM instance profile
+            # tags: {key: value}              # key-value pair of tags assigned to the
+            #                                 # Ops Manager VM
+            # Omit if using instance profiles
+            # And instance profile OR access_key/secret_access_key is required
+            # access_key_id: ((access-key-id))
+            # secret_access_key: ((secret-access-key))
+
+            # security_group_id: sg-123  # DEPRECATED - use security_group_ids
+            # use_instance_profile: true # DEPRECATED - will use instance profile for
+                                        # execution VM if access_key_id and
+                                        # secret_access_key are not set
+
+          # Optional Ops Manager UI Settings for upgrade-opsman
+          # ssl-certificate: ...
+          # pivotal-network-settings: ...
+          # banner-settings: ...
+          # syslog-settings: ...
+          # rbac-settings: ...
+      
+    
+ + Azure + +
+      
+        ---
+        opsman-configuration:
+          azure:
+            tenant_id: 3e52862f-a01e-4b97-98d5-f31a409df682
+            subscription_id: 90f35f10-ea9e-4e80-aac4-d6778b995532
+            client_id: 5782deb6-9195-4827-83ae-a13fda90aa0d
+            client_secret: ((opsman-client-secret))
+            location: westus
+            resource_group: res-group
+            storage_account: opsman                       # account name of container
+            ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAZ...  # ssh key to access VM
+
+            # Note that there are several environment-specific details in this path
+            # This path can reach out to other resource groups if necessary
+            subnet_id: /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/
+
+            # At least one IP address (public or private) needs to be assigned
+            # to the VM. It is also permissible to assign both.
+            private_ip: 10.0.0.3
+            public_ip: 1.2.3.4
+
+            # Optional
+            # cloud_name: AzureCloud          # default - AzureCloud
+            # storage_key: ((storage-key))    # only required if your client does not
+                                              # have the needed storage permissions
+            # container: opsmanagerimage      # storage account container name
+                                              # default - opsmanagerimage
+            # network_security_group: ops-manager-security-group
+            # vm_name: ops-manager-vm         # default - ops-manager-vm
+            # boot_disk_size: 200             # default - 200 (GB)
+            # use_managed_disk: true          # this flag is only respected by the
+                                              # create-vm and upgrade-opsman commands.
+                                              # set to false if you want to create
+                                              # the new opsman VM with an unmanaged
+                                              # disk (not recommended). default - true
+            # storage_sku: Premium_LRS        # this sets the SKU of the storage account
+                                              # for the disk
+                                              # Allowed values: Standard_LRS, Premium_LRS,
+                                              # StandardSSD_LRS, UltraSSD_LRS
+            # vm_size: Standard_DS1_v2        # the size of the Ops Manager VM
+                                              # default - Standard_DS2_v2
+                                              # Allowed values: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes-general
+            # tags: Project=ECommerce         # Space-separated tags: key[=value] [key[=value] ...]. Use '' to
+                                              # clear existing tags.
+            # vpc_subnet: /subscriptions/...  # DEPRECATED - use subnet_id
+            # use_unmanaged_disk: false       # DEPRECATED - use use_managed_disk
+
+          # Optional Ops Manager UI Settings for upgrade-opsman
+          # ssl-certificate: ...
+          # pivotal-network-settings: ...
+          # banner-settings: ...
+          # syslog-settings: ...
+          # rbac-settings: ...
+      
+    
+ + GCP + +
+      
+        ---
+        opsman-configuration:
+          gcp:
+            # Either gcp_service_account_name or gcp_service_account json is required
+            # You must remove whichever you don't use
+            gcp_service_account_name: user@project-id.iam.gserviceaccount.com
+            gcp_service_account: ((gcp-service-account-key-json))
+
+            project: project-id
+            region: us-central1
+            zone: us-central1-b
+            vpc_subnet: infrastructure-subnet
+
+            # At least one IP address (public or private) needs to be assigned to the
+            # VM. It is also permissible to assign both.
+            public_ip: 1.2.3.4
+            private_ip: 10.0.0.2
+
+            ssh_public_key: ssh-rsa some-public-key... # RECOMMENDED, but not required
+            tags: ops-manager                          # RECOMMENDED, but not required
+
+            # Optional
+            # vm_name: ops-manager-vm  # default - ops-manager-vm
+            # custom_cpu: 2            # default - 2
+            # custom_memory: 8         # default - 8
+            # boot_disk_size: 100      # default - 100
+            # scopes: ["my-scope"]
+            # hostname: custom.hostname # info: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+
+          # Optional Ops Manager UI Settings for upgrade-opsman
+          # ssl-certificate: ...
+          # pivotal-network-settings: ...
+          # banner-settings: ...
+          # syslog-settings: ...
+          # rbac-settings: ...
+      
+    
+ + Openstack + +
+      
+      ---
+      opsman-configuration:
+        openstack:
+          project_name: project
+          auth_url: http://os.example.com:5000/v2.0
+          username: ((opsman-openstack-username))
+          password: ((opsman-openstack-password))
+          net_id: 26a13112-b6c2-11e8-96f8-529269fb1459
+          security_group_name: opsman-sec-group
+          key_pair_name: opsman-keypair
+
+          # At least one IP address (public or private) needs to be assigned to the VM.
+          public_ip: 1.2.3.4 # must be an already allocated floating IP
+          private_ip: 10.0.0.3
+
+          # Optional
+          # availability_zone: zone-01
+          # project_domain_name: default
+          # user_domain_name: default
+          # vm_name: ops-manager-vm       # default - ops-manager-vm
+          # flavor: m1.xlarge             # default - m1.xlarge
+          # identity_api_version: 2       # default - 3
+          # insecure: true                # default - false
+
+        # Optional Ops Manager UI Settings for upgrade-opsman
+        # ssl-certificate: ...
+        # pivotal-network-settings: ...
+        # banner-settings: ...
+        # syslog-settings: ...
+        # rbac-settings: ...
+      
+    
+ + vSphere + +
+      
+        ---
+        opsman-configuration:
+          vsphere:
+            vcenter:
+              ca_cert: cert                 # REQUIRED if insecure = 0 (secure)
+              datacenter: example-dc
+              datastore: example-ds-1
+              folder: /example-dc/vm/Folder # RECOMMENDED, but not required
+              url: vcenter.example.com
+              username: ((vcenter-username))
+              password: ((vcenter-password))
+              resource_pool: /example-dc/host/example-cluster/Resources/example-pool
+              # resource_pool can use a cluster - /example-dc/host/example-cluster
+
+              # Optional
+              # host: host      # DEPRECATED - Platform Automation cannot guarantee
+                                # the location of the VM, given the nature of vSphere
+              # insecure: 0     # default - 0 (secure) | 1 (insecure)
+
+            disk_type: thin     # thin|thick
+            dns: 8.8.8.8
+            gateway: 192.168.10.1
+            hostname: ops-manager.example.com
+            netmask: 255.255.255.192
+            network: example-virtual-network
+            ntp: ntp.ubuntu.com
+            private_ip: 10.0.0.10
+            ssh_public_key: ssh-rsa ......   # REQUIRED Ops Manager >= 2.6
+
+            # Optional
+            # cpu: 1                         # default - 1
+            # memory: 8                      # default - 8 (GB)
+            # ssh_password: ((ssh-password)) # REQUIRED if ssh_public_key not defined
+                                            # (Ops Manager < 2.6 ONLY)
+            # vm_name: ops-manager-vm        # default - ops-manager-vm
+            # disk_size: 200                 # default - 160 (GB), only larger values allowed
+
+          # Optional Ops Manager UI Settings for upgrade-opsman
+          # ssl-certificate: ...
+          # pivotal-network-settings: ...
+          # banner-settings: ...
+          # syslog-settings: ...
+          # rbac-settings: ...
+      
+    
+ +1. Option 2: Alternatively, you can auto-generate your opsman.yml +using a `p-automator` command to output an opsman.yml file +in the directory it is called from. + + **AWS** + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + p-automator export-opsman-config \ + --state-file generated-state/state.yml \ + --config-file opsman.yml \ + --aws-region "$AWS_REGION" \ + --aws-secret-access-key "$AWS_SECRET_ACCESS_KEY" \ + --aws-access-key-id "$AWS_ACCESS_KEY_ID" + ``` + + **Azure** + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + p-automator export-opsman-config \ + --state-file generated-state/state.yml \ + --config-file opsman.yml \ + --azure-subscription-id "$AZURE_SUBSCRIPTION_ID" \ + --azure-tenant-id "$AZURE_TENANT_ID" \ + --azure-client-id "$AZURE_CLIENT_ID" \ + --azure-client-secret "$AZURE_CLIENT_SECRET" \ + --azure-resource-group "$AZURE_RESOURCE_GROUP" + ``` + + **GCP** + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + p-automator export-opsman-config \ + --state-file generated-state/state.yml \ + --config-file opsman.yml \ + --gcp-zone "$GCP_ZONE" \ + --gcp-service-account-json <(echo "$GCP_SERVICE_ACCOUNT_JSON") \ + --gcp-project-id "$GCP_PROJECT_ID" + ``` + + **vSphere** + + ```bash + docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ + p-automator export-opsman-config \ + --state-file generated-state/state.yml \ + --config-file opsman.yml \ + --vsphere-url "$VCENTER_URL" \ + --vsphere-username "$VCENTER_USERNAME" \ + --vsphere-password "$VCENTER_PASSWORD" + ``` + +1. Once you have your config file, commit and push it: + + ```bash + git add opsman.yml + git commit -m "Add opsman config" + git push + ``` + +2. Get the image for the new Tanzu Operations Manager version using the [`download-product`](../tasks.html#download-product) task. +It requires a config file to specify which Tanzu Operations Manager to get, +and to provide Tanzu Network credentials. +Name this file `download-opsman.yml`: + + ```yaml + --- + pivnet-api-token: ((pivnet-refresh-token)) # interpolated from CredHub + pivnet-file-glob: "ops-manager*.ova" + pivnet-product-slug: ops-manager + product-version-regex: ^2\.5\.0.*$ + ``` + + ```bash + git add download-opsman.yml + git commit -m "Add download opsman config" + git push + ``` + +1. Now put it all together using the following: + + ```yaml hl_lines="16-46" + - name: upgrade-opsman + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: env + - get: installation + - task: credhub-interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/credhub-interpolate.yml + params: + CREDHUB_CLIENT: ((credhub-client)) + CREDHUB_SECRET: ((credhub-secret)) + CREDHUB_SERVER: ((credhub-server)) + PREFIX: /concourse/your-team-name/foundation + input_mapping: + files: env + output_mapping: + interpolated-files: interpolated-configs + - task: download-opsman-image + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-opsman.yml + input_mapping: + config: interpolated-configs + - task: upgrade-opsman + image: platform-automation-image + file: platform-automation-tasks/tasks/upgrade-opsman.yml + input_mapping: + config: interpolated-configs + image: downloaded-product + secrets: interpolated-configs + state: env + ``` + +

+ We do not explicitly set the default parameters + for upgrade-opsman in this example. + Because opsman.yml is the default input to OPSMAN_CONFIG_FILE, + env.yml is the default input to ENV_FILE, + and state.yml is the default input to STATE_FILE, + it is redundant to set this param in the pipeline. + See the task definitions + for a full range of the + available and default parameters.

+ +2. Set the pipeline. + + Before running the job, + [`ensure`](https://concourse-ci.org/jobs.html#schema.step.ensure) that `state.yml` is always persisted + regardless of whether the `upgrade-opsman` job failed or passed. + Add the following section to the job: + + ```yaml hl_lines="49-68" + - name: upgrade-opsman + serial: true + plan: + - get: platform-automation-image + resource: platform-automation + params: + globs: ["*image*.tgz"] + unpack: true + - get: platform-automation-tasks + resource: platform-automation + params: + globs: ["*tasks*.zip"] + unpack: true + - get: env + - get: installation + - task: credhub-interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/credhub-interpolate.yml + params: + CREDHUB_CLIENT: ((credhub-client)) + CREDHUB_SECRET: ((credhub-secret)) + CREDHUB_SERVER: ((credhub-server)) + PREFIX: /concourse/your-team-name/foundation + input_mapping: + files: env + output_mapping: + interpolated-files: interpolated-configs + - task: download-opsman-image + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-opsman.yml + input_mapping: + config: interpolated-configs + - task: upgrade-opsman + image: platform-automation-image + file: platform-automation-tasks/tasks/upgrade-opsman.yml + input_mapping: + config: interpolated-configs + image: downloaded-product + secrets: interpolated-configs + state: env + ensure: + do: + - task: make-commit + image: platform-automation-image + file: platform-automation-tasks/tasks/make-git-commit.yml + input_mapping: + repository: env + file-source: generated-state + output_mapping: + repository-commit: env-commit + params: + FILE_SOURCE_PATH: state.yml + FILE_DESTINATION_PATH: state.yml + GIT_AUTHOR_EMAIL: "ci-user@example.com" + GIT_AUTHOR_NAME: "CI User" + COMMIT_MESSAGE: 'Update state file' + - put: env + params: + repository: env-commit + merge: true + ``` + +3. Set the pipeline one final time, +run the job, and see it pass. + + ```bash + git add pipeline.yml + git commit -m "Upgrade Ops Manager in CI" + git push + ``` + +Your upgrade pipeline is now complete. diff --git a/docs/how-to-guides/upgrade-existing-opsman.md b/docs/how-to-guides/upgrade-existing-opsman.md deleted file mode 100644 index 6aa77101..00000000 --- a/docs/how-to-guides/upgrade-existing-opsman.md +++ /dev/null @@ -1,698 +0,0 @@ -# Writing a Pipeline to Upgrade an Existing Ops Manager - -This how-to-guide shows you how to create a pipeline for upgrading an existing Ops Manager VM. -If you don't have an Ops Manager VM, check out [Installing Ops Manager][install-how-to]. - -{% set upgradeHowTo = True %} -{% include ".getting-started.md" %} - -### Exporting The Installation - -We're finally in a position to do work! - -While ultimately we want to upgrade Ops Manager, -to do that safely we first need to download and persist -an export of the current installation. - -!!! warning "Export your installation routinely" - We _**strongly recommend**_ automatically exporting - the Ops Manager installation - and _**persisting it to your blobstore**_ on a regular basis. - This ensures that if you need to upgrade (or restore!) - your Ops Manager for any reason, - you'll have the latest installation info available. - Later in this tutorial, we'll be adding a time trigger - for exactly this reason. - -Let's switch out the test job -for one that exports our existing Ops Manager's installation state. -We can switch the task out by changing: - -- the `name` of the job -- the `name` of the task -- the `file` of the task - -[`export-installation`][export-installation] -has an additional required input. -We need the `env` file used to talk to Ops Manager. - -We'll write that file and make it available as a resource in a moment, -for now, we'll just `get` it as if it's there. - -It also has an additional output (the exported installation). -Again, for now, we'll just write that -like we have somewhere to `put` it. - -Finally, while it's fine for `test` to run in parallel, -`export-installation` shouldn't. -So, we'll add `serial: true` to the job, too. - -```yaml hl_lines="2 3 15-21" -jobs: -- name: export-installation - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: env - - task: export-installation - image: platform-automation-image - file: platform-automation-tasks/tasks/export-installation.yml - - put: installation - params: - file: installation/installation-*.zip -``` - -If we try to `fly` this up to Concourse, -it will again complain about resources that don't exist. - -So, let's make them. - -The first new resource we need is the env file. -We'll push our git repo to a remote on Github -to make this (and later, other) configuration available to the pipelines. - -Github has good [instructions][git-add-existing] -you can follow to create a new repository on Github. -You can skip over the part -about using `git init` to setup your repo, -since we [already did that](#but-first-git-init). - -Go ahead and setup your remote -and use `git push` to make what we have available. -We will use this repository to hold our single foundation specific configuration. -We are using the ["Single Repository for Each Foundation"][single-foundation-pattern] -pattern to structure our configurations. - -You will also need to add the repository URL -to `vars.yml` so we can reference it later, -when we declare the corresponding resource. - -```yaml -pipeline-repo: git@github.com:username/your-repo-name -``` - -Now lets write an `env.yml` for your Ops Manager. - -`env.yml` holds authentication and target information -for a particular Ops Manager. - -An example `env.yml` for username/password authentication -is shown below with the required properties. -Please reference [Configuring Env][generating-env-file] for the entire list of properties -that can be used with `env.yml` -as well as an example of an `env.yml` -that can be used with UAA (SAML, LDAP, etc.) authentication. - -The property `decryption-passphrase` is required for `import-installation`, -and therefore required for `upgrade-opsman`. - -If your foundation uses authentication other than basic auth, -please reference [Inputs and Outputs][env] -for more detail on UAA-based authentication. - - -```yaml -target: ((opsman-url)) -username: ((opsman-username)) -password: ((opsman-password)) -decryption-passphrase: ((opsman-decryption-passphrase)) -``` - -Add and commit the new `env.yml` file: - -```bash -git add env.yml -git commit -m "Add environment file for foundation" -git push -``` - -Now that the env file we need is in our git remote, -we need to add a resource to tell Concourse how to get it as `env`. - -Since this is (probably) a private repo, -we'll need to create a deploy key Concourse can use to access it. -Follow [Github's instructions][git-deploy-keys] -for creating a deploy key. - -Then, put the private key in Credhub so we can use it in our pipeline: - -```bash -# note the starting space - credhub set \ - --name /concourse/your-team-name/plat-auto-pipes-deploy-key \ - --type ssh \ - --private the/filepath/of/the/key-id_rsa \ - --public the/filepath/of/the/key-id_rsa.pub -``` - -Then, add this to the resources section of your pipeline file: - -```yaml -- name: env - type: git - source: - uri: ((pipeline-repo)) - private_key: ((plat-auto-pipes-deploy-key.private_key)) - branch: main -``` - -We'll put the credentials we need in Credhub: - -```bash -# note the starting space throughout - credhub set \ - -n /concourse/your-team-name/foundation/opsman-username \ - -t value -v your-opsman-username - credhub set \ - -n /concourse/your-team-name/foundation/opsman-password \ - -t value -v your-opsman-password - credhub set \ - -n /concourse/your-team-name/foundation/opsman-decryption-passphrase \ - -t value -v your-opsman-decryption-passphrase -``` - -{% include './.paths-and-pipeline-names.md' %} - -In order to perform interpolation in one of our input files, -we'll need the [`credhub-interpolate` task][credhub-interpolate] -Earlier, we relied on Concourse's native integration with Credhub for interpolation. -That worked because we needed to use the variable -in the pipeline itself, not in one of our inputs. - -We can add it to our job -after we've retrieved our `env` input, -but before the `export-installation` task: - -```yaml hl_lines="16-26" -jobs: -- name: export-installation - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: env - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - params: - CREDHUB_CLIENT: ((credhub-client)) - CREDHUB_SECRET: ((credhub-secret)) - CREDHUB_SERVER: https://your-credhub.example.com - PREFIX: /concourse/your-team-name/foundation - input_mapping: - files: env - output_mapping: - interpolated-files: interpolated-env - - task: export-installation - image: platform-automation-image - file: platform-automation-tasks/tasks/export-installation.yml - input_mapping: - env: interpolated-env - - put: installation - params: - file: installation/installation-*.zip -``` - -!!! info A bit on "output_mapping" -

The `credhub-interpolate` task for this job - maps the output from the task (`interpolated-files`) - to `interpolated-env`. -

This can be used by the next task in the job - to more explicitly define the inputs/outputs of each task. - It is also okay to leave the output as `interpolated-files` - if it is appropriately referenced in the next task - -Notice the [input mappings][concourse-input-mapping] -of the `credhub-interpolate` and `export-installation` tasks. -This allows us to use the output of one task -as in input of another. - -An alternative to `input_mappings` is discussed in -[Configuration Management Strategies][advanced-pipeline-design]. - -We now need to put our `credhub_client` and `credhub_secret` into Credhub, -so Concourse's native integration can retrieve them -and pass them as configuration to the `credhub-interpolate` task. - -```bash -# note the starting space throughout - credhub set \ - -n /concourse/your-team-name/credhub-client \ - -t value -v your-credhub-client - credhub set \ - -n /concourse/your-team-name/credhub-secret \ - -t value -v your-credhub-secret -``` - -Now, the `credhub-interpolate` task -will interpolate our config input, -and pass it to `export-installation` as `config`. - -The other new resource we need is a blobstore, -so we can persist the exported installation. - -We'll add an [S3 resource][s3-resource] -to the `resources` section: - -```yaml -- name: installation - type: s3 - source: - access_key_id: ((s3-access-key-id)) - secret_access_key: ((s3-secret-key)) - bucket: ((platform-automation-bucket)) - regexp: installation-(.*).zip -``` - -Again, we'll need to save the credentials in Credhub: - -```bash -# note the starting space throughout - credhub set \ - -n /concourse/your-team-name/s3-access-key-id \ - -t value -v your-bucket-s3-access-key-id - credhub set \ - -n /concourse/your-team-name/s3-secret-key \ - -t value -v your-s3-secret-key -``` - -This time (and in the future), -when we set the pipeline with `fly`, -we'll need to load vars from `vars.yml`. - -```bash -# note the space before the command - fly -t control-plane set-pipeline \ - -p foundation \ - -c pipeline.yml \ - -l vars.yml -``` - -Now you can manually trigger a build, and see it pass. - -!!! tip "Bash command history" -

You'll be using this, - the ultimate form of the `fly` command to set your pipeline, - for the rest of the tutorial. -

You can save yourself some typing by using your bash history - (if you did not prepend your command with a space). - You can cycle through previous commands with the up and down arrows. - Alternatively, - Ctrl-r will search your bash history. - Just hit Ctrl-r, type `fly`, - and it'll show you the last fly command you ran. - Run it with enter. - Instead of running it, - you can hit Ctrl-r again - to see the matching command before that. - -This is also a good commit point: - -```bash -git add pipeline.yml vars.yml -git commit -m "Export foundation installation in CI" -git push -``` - -### Performing The Upgrade - -Now that we have an exported installation, -we'll create another Concourse job to do the upgrade itself. -We want the export and the upgrade in separate jobs -so they can be triggered (and re-run) independently. - -We know this new job is going to center -on the [`upgrade-opsman`][upgrade-opsman] task. -Click through to the task description, -and write a new job that has `get` steps -for our platform-automation resources -and all the inputs we already know how to get: - -```yaml -- name: upgrade-opsman - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: env - - get: installation -``` - -We should be able to set this with `fly` and see it pass, -but it doesn't _do_ anything other than download the resources. -Still, we can make a commit here: - -```bash -git add pipeline.yml -git commit -m "Setup initial gets for upgrade job" -git push -``` - -!!! tip "Is this really a commit point though?" -

We like frequent, small commits that can be `fly` set and, - ideally, go green. -

This one doesn't actually do anything though, right? - Fair, but: setting and running the job - gives you feedback on your syntax and variable usage. - It can catch typos, resources you forgot to add or misnamed, etc. - Committing when you get to a working point helps keeps the diffs small, - and the history tractable. - Also, down the line, if you've got more than one pair working on a foundation, - the small commits help you keep off one another's toes. -

We don't demonstrate this workflow here, - but it can even be useful to make a commit, - use `fly` to see if it works, - and then push it if and only if it works. - If it doesn't, you can use `git commit --amend` - once you've figured out why and fixed it. - This workflow makes it easy to keep what is set on Concourse - and what is pushed to your source control remote in sync. - -Looking over the list of inputs for [`upgrade-opsman`][upgrade-opsman] -we still need three required inputs: - -1. `state` -1. `config` -1. `image` - -The optional inputs are vars used with the config, -so we'll get to those when we do `config`. - -Let's start with the [state file][state]. -We need to record the `iaas` we're on -and the ID of the _currently deployed_ Ops Manager VM. -Different IaaS uniquely identify VMs differently; -here are examples for what this file should look like, -depending on your IaaS: - -=== "AWS" - ``` yaml - --8<-- 'docs/examples/state/aws.yml' - ``` - -=== "Azure" - ``` yaml - --8<-- 'docs/examples/state/azure.yml' - ``` - -=== "GCP" - ``` yaml - --8<-- 'docs/examples/state/gcp.yml' - ``` - -=== "OpenStack" - ``` yaml - --8<-- 'docs/examples/state/openstack.yml' - ``` - -=== "vSphere" - ``` yaml - --8<-- 'docs/examples/state/vsphere.yml' - ``` - -Find what you need for your IaaS, -write it in your repo as `state.yml`, -commit it, and push it: - -```bash -git add state.yml -git commit -m "Add state file for foundation Ops Manager" -git push -``` - -We can map the `env` resource to [`upgrade-opsman`][upgrade-opsman]'s -`state` input once we add the task. - -But first, we've got two more inputs to arrange for. - -We'll write an [Ops Manager VM Configuration file][opsman-config] -to `opsman.yml`. -The properties available vary by IaaS; -regardless, you can often inspect your existing Ops Manager -in your IaaS's console -(or, if your Ops Manager was created with Terraform, -look at your terraform outputs) -to find the necessary values. - -=== "AWS" - ---excerpt--- "examples/aws-configuration" -=== "Azure" - ---excerpt--- "examples/azure-configuration" -=== "GCP" - ---excerpt--- "examples/gcp-configuration" -=== "Openstack" - ---excerpt--- "examples/openstack-configuration" -=== "vSphere" - ---excerpt--- "examples/vsphere-configuration" - -Alternatively, you can auto-generate your opsman.yml -using a `p-automator` command to output an opsman.yml file -in the directory it is called from. - -=== "AWS" - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - p-automator export-opsman-config \ - --state-file generated-state/state.yml \ - --config-file opsman.yml \ - --aws-region "$AWS_REGION" \ - --aws-secret-access-key "$AWS_SECRET_ACCESS_KEY" \ - --aws-access-key-id "$AWS_ACCESS_KEY_ID" - ``` - -=== "Azure" - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - p-automator export-opsman-config \ - --state-file generated-state/state.yml \ - --config-file opsman.yml \ - --azure-subscription-id "$AZURE_SUBSCRIPTION_ID" \ - --azure-tenant-id "$AZURE_TENANT_ID" \ - --azure-client-id "$AZURE_CLIENT_ID" \ - --azure-client-secret "$AZURE_CLIENT_SECRET" \ - --azure-resource-group "$AZURE_RESOURCE_GROUP" - ``` - -=== "GCP" - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - p-automator export-opsman-config \ - --state-file generated-state/state.yml \ - --config-file opsman.yml \ - --gcp-zone "$GCP_ZONE" \ - --gcp-service-account-json <(echo "$GCP_SERVICE_ACCOUNT_JSON") \ - --gcp-project-id "$GCP_PROJECT_ID" - ``` - -=== "vSphere" - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - p-automator export-opsman-config \ - --state-file generated-state/state.yml \ - --config-file opsman.yml \ - --vsphere-url "$VCENTER_URL" \ - --vsphere-username "$VCENTER_USERNAME" \ - --vsphere-password "$VCENTER_PASSWORD" - ``` - -Once you have your config file, commit and push it: - -```bash -git add opsman.yml -git commit -m "Add opsman config" -git push -``` - -Finally, we need the image for the new Ops Manager version. - -We'll use the [`download-product`][download-product] task. -It requires a config file to specify which Ops Manager to get, -and to provide Tanzu Network credentials. -Name this file `download-opsman.yml`: - -```yaml ---- -pivnet-api-token: ((pivnet-refresh-token)) # interpolated from Credhub -pivnet-file-glob: "ops-manager*.ova" -pivnet-product-slug: ops-manager -product-version-regex: ^2\.5\.0.*$ -``` - -You know the drill. - -```bash -git add download-opsman.yml -git commit -m "Add download opsman config" -git push -``` - -Now, we can put it all together: - -```yaml hl_lines="16-46" -- name: upgrade-opsman - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: env - - get: installation - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - params: - CREDHUB_CLIENT: ((credhub-client)) - CREDHUB_SECRET: ((credhub-secret)) - CREDHUB_SERVER: ((credhub-server)) - PREFIX: /concourse/your-team-name/foundation - input_mapping: - files: env - output_mapping: - interpolated-files: interpolated-configs - - task: download-opsman-image - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-opsman.yml - input_mapping: - config: interpolated-configs - - task: upgrade-opsman - image: platform-automation-image - file: platform-automation-tasks/tasks/upgrade-opsman.yml - input_mapping: - config: interpolated-configs - image: downloaded-product - secrets: interpolated-configs - state: env -``` - -!!! note "Defaults for tasks" - We do not explicitly set the default parameters - for `upgrade-opsman` in this example. - Because `opsman.yml` is the default input to `OPSMAN_CONFIG_FILE`, - `env.yml` is the default input to `ENV_FILE`, - and `state.yml` is the default input to `STATE_FILE`, - it is redundant to set this param in the pipeline. - Refer to the [task definitions][task-reference] for a full range of the - available and default parameters. - -Set the pipeline. - -Before we run the job, -we should [`ensure`][ensure] that `state.yml` is always persisted -regardless of whether the `upgrade-opsman` job failed or passed. -To do this, we can add the following section to the job: - -```yaml hl_lines="49-68" -- name: upgrade-opsman - serial: true - plan: - - get: platform-automation-image - resource: platform-automation - params: - globs: ["*image*.tgz"] - unpack: true - - get: platform-automation-tasks - resource: platform-automation - params: - globs: ["*tasks*.zip"] - unpack: true - - get: env - - get: installation - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - params: - CREDHUB_CLIENT: ((credhub-client)) - CREDHUB_SECRET: ((credhub-secret)) - CREDHUB_SERVER: ((credhub-server)) - PREFIX: /concourse/your-team-name/foundation - input_mapping: - files: env - output_mapping: - interpolated-files: interpolated-configs - - task: download-opsman-image - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-opsman.yml - input_mapping: - config: interpolated-configs - - task: upgrade-opsman - image: platform-automation-image - file: platform-automation-tasks/tasks/upgrade-opsman.yml - input_mapping: - config: interpolated-configs - image: downloaded-product - secrets: interpolated-configs - state: env - ensure: - do: - - task: make-commit - image: platform-automation-image - file: platform-automation-tasks/tasks/make-git-commit.yml - input_mapping: - repository: env - file-source: generated-state - output_mapping: - repository-commit: env-commit - params: - FILE_SOURCE_PATH: state.yml - FILE_DESTINATION_PATH: state.yml - GIT_AUTHOR_EMAIL: "ci-user@example.com" - GIT_AUTHOR_NAME: "CI User" - COMMIT_MESSAGE: 'Update state file' - - put: env - params: - repository: env-commit - merge: true -``` - -Set the pipeline one final time, -run the job, and see it pass. - -```bash -git add pipeline.yml -git commit -m "Upgrade Ops Manager in CI" -git push -``` - -Your upgrade pipeline is now complete. -You are now free to move on to the next steps of your automation journey. - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/img/upgrade-flowchart.png b/docs/img/upgrade-flowchart.png new file mode 100644 index 00000000..99d45ac2 Binary files /dev/null and b/docs/img/upgrade-flowchart.png differ diff --git a/docs/img/variables-interpolate-flowchart-independent.png b/docs/img/variables-interpolate-flowchart-independent.png new file mode 100644 index 00000000..a412112b Binary files /dev/null and b/docs/img/variables-interpolate-flowchart-independent.png differ diff --git a/docs/img/variables-interpolate-flowchart-mixed.png b/docs/img/variables-interpolate-flowchart-mixed.png new file mode 100644 index 00000000..28f49d59 Binary files /dev/null and b/docs/img/variables-interpolate-flowchart-mixed.png differ diff --git a/docs/index.html.md.erb b/docs/index.html.md.erb new file mode 100644 index 00000000..59deccd0 --- /dev/null +++ b/docs/index.html.md.erb @@ -0,0 +1,157 @@ +# Overview + +Platform Automation Toolkit provides building blocks +to create repeatable and reusable automated pipeline(s) +for upgrading and installing foundations. +VMware also provides instructions for using these building blocks in various workflows. +This introduction provides a high-level overview of Platform Automation Toolkit. +To dive deeper, see the references section. + +See the [Getting Started](./getting-started.html) section for instructions +on how to start using Platform Automation Toolkit. + +## About Platform Automation Toolkit + +* Uses the [GitHub om repo](https://github.com/pivotal-cf/om), + (and by extension, the Tanzu Operations Manager API) + to enable command-line interaction with Tanzu Operations Manager + ([Understanding the Tanzu Operations Manager Interface](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/pcf-interface.html)) + + + +* Includes a documented reference pipeline + showing one possible configuration for using tasks. + When automating your platform, + there are some manual steps you'll need to take to optimize for automation. + These steps will be emphasized to ensure that they are clear to you. + +* Comes bundled with [Concourse tasks](https://concourse-ci.org/tasks.html) + that demonstrate how to use tasks + in a containerized Continuous Integration (CI) system.
+ Platform Automation Toolkit tasks are: + + * Legible: They use + human-readable YAML config files that can be edited and managed. + + * Modular: Each task has defined inputs and outputs + that perform granular actions. + + * Built for Automation: Tasks are idempotent, + so re-running them in a CI won't break builds. + + * Not Comprehensive: Workflows that use Platform Automation Toolkit + may also contain `om` commands, custom tasks, + and even interactions with the Tanzu Operations Manager user interface. + Platform Automation Toolkit is a set of tools to use alongside other tools, + rather than a being a comprehensive solution on its own. + +* Can be used with a documented and supported deployment of Concourse CI. + The [Concourse for Platform Automation docs](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/concourse-for-tanzu/7-0/tanzu-concourse/installation-install-platform-automation.html) provide a step-by-step tutorial for how to get started. + This approach to deploying Concourse uses the BOSH Director deployed by Tanzu Operations Manager to deploy and maintain Concourse, CredHub, and User Account and Authentication (UAA). + + + +The [Task Reference](./tasks.html) topic discusses the example tasks. + +

+Platform Automation Toolkit takes a different approach than PCF Pipelines does. +For instance, Platform Automation Toolkit allows you +to perform installs and upgrades in the same pipeline. +We recommend trying out Platform Automation Toolkit +to get a sense of the features and how they differ +to understand the best transition method for your environment and needs.

+ +## Platform Automation Toolkit and Upgrading Tanzu Operations Manager + +Successful platform engineering teams know that a platform team +that's always up to date is critical for their business. +If they don’t stay up to date, +they miss out on the latest platform features and the services that VMware delivers, +which means their development teams miss out, too. +By not keeping up to date, +platforms could encounter security risks or even application failures. + +VMware offers regular updates for Tanzu Operations Manager and the products it installs, +which ensures that our customers have access to the latest security patches and new features. +For example, VMware releases security patches every six days on average. + +So how can a platform engineering team simplify the platform upgrade process? + +### Small and Continuous Upgrades + +Adopting the practice of small and constant platform updates +is one of the best ways to simplify the platform upgrade process. +This behavior can significantly reduce risk, +increase stability with faster troubleshooting, +and reduce the overall effort of upgrading. +This also creates a culture of continuous iteration +and improves feedback loops with the platform teams and the developers, +building trust across the organization. +A good place to start is to consume every patch. + +How do we do this? + +### Small and Continuous Upgrades With Platform Automation Toolkit + +With Platform Automation Toolkit, +platform teams have the tools to create an automated perpetual upgrade machine, +which can continuously take the latest updates when new software is available, +including Tanzu Platform for Cloud Foundry, VMware Tanzu PKS, Tanzu Operations Manager, stemcells, products, and services. +In addition, Platform Automation Toolkit allows you to: + +* manage multiple foundations and reduce configuration drift + by tracking changes through source control with + externalized configuration + +* create pipelines that handle installs and upgrades to streamline workflows + +

+Tanzu Application Service is now called Tanzu Platform for Cloud Foundry. +The current version of Tanzu Platform for Cloud Foundry is 10.0.

+ +## Platform Automation Toolkit and Tanzu Operations Manager + +The following table compares how Tanzu Operations Manager +and Platform Automation Toolkit might run a typical sequence of Tanzu Operations Manager operations: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tanzu Operations ManagerPlatform Automation Toolkit
When to useFirst install and minor upgradesConfig changes and patch upgrades
1. Create Tanzu Operations Manager VMManually prepare IaaS and create Tanzu Operations Manager VMcreate-vm
2. Configure who can run opsManually configure internal UAA or external identity providerconfigure-authentication or configure-saml-authentication
3. Configure BOSHManually configure BOSH Directorconfigure-director with settings saved from BOSH Director with same version
4. Add productsClick Import a Product to upload file, then + to add tile to Installation Dashboardupload-and-stage-product
5. Configure productsManually configure productsconfigure-product with settings saved from tiles with same version
6. Deploy ProductsClick Apply Changesapply-changes
7. UpgradeManually export existing Tanzu Operations Manager settings, power off the VM, then create a new, updated + Tanzu Operations Manager VMexport-installation then upgrade-opsman
diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index ff107f67..00000000 --- a/docs/index.md +++ /dev/null @@ -1,149 +0,0 @@ -Platform Automation Toolkit provides building blocks -to create repeatable and reusable automated pipeline(s) -for upgrading and installing foundations. -We also provide instructions on using these building blocks in various workflows. -In this introduction, we'll provide a high-level overview of Platform Automation Toolkit. -To dive-deeper, check out the references section. - -See the [Getting Started][getting-started] section for instructions -on how to start using Platform Automation Toolkit. - -## About - -* Uses [om][om], - (and by extension, the Ops Manager API) - to enable command-line interaction with Ops Manager - ([Understanding the Ops Manager Interface][platform-understanding-opsman]) - -* Includes a documented reference pipeline - showing one possible configuration to use tasks. - When automating your platform, - there are some manual steps you'll need to take to optimize for automation. - We will call these steps out so that these are clear to you. - -* Comes bundled with Concourse [tasks][concourse-task-definition] - that demonstrate how to use these tasks - in a containerized Continuous Integration (CI) system. - Platform Automation Toolkit tasks are: - - * Legible: They use - human-readable YAML config files which can be edited and managed - - * Modular: Each task has defined inputs and outputs - that perform granular actions - - * Built for Automation: Tasks are idempotent, - so re-running them in a CI won't break builds - - * Not Comprehensive: Workflows that use Platform Automation Toolkit - may also contain `om` commands, custom tasks, - and even interactions with the Ops Manager user interface. - Platform Automation Toolkit is a set of tools to use alongside other tools, - rather than a comprehensive solution. - -* A documented and supported deployment of Concourse CI to use with Platform Automation Toolkit. - The [Concourse for Platform Automation docs][concourse-for-pa] provide a step-by-step tutorial for how to get started. - This approach to deploying Concourse uses the BOSH Director deployed by Ops Manager to deploy and maintain Concourse, Credhub, and UAA. - -The [Task Reference][task-reference] topic discusses these example tasks further. - -!!! info "Transitioning from PCF Pipelines" - Platform Automation Toolkit takes a different approach than PCF Pipelines. - For instance, Platform Automation Toolkit allows you - to perform installs and upgrades in the same pipeline. - We recommend trying out Platform Automation Toolkit - to get a sense of the features and how they differ - to understand the best transition method for your environment and needs. - -## Platform Automation Toolkit and Upgrading Ops Manager - -Successful platform engineering teams know that a platform team -that's always up to date is critical for their business. -If they don’t stay up to date, -they miss out on the latest platform features and the services that VMware delivers, -which means their development teams miss out too. -By not keeping up to date, -platforms could encounter security risks or even application failures. - -VMware offers regular updates for Ops Manager and the products it installs, -which ensures our customers have access to the latest security patches and new features. -For example, VMware releases security patches every six days on average. - -So how can a platform engineering team simplify the platform upgrade process? - -#### Small and Continuous Upgrades - -Adopting the practice of small and constant platform updates -is one of the best ways to simplify the platform upgrade process. -This behavior can significantly reduce risk, -increase stability with faster troubleshooting, -and overall reduce the effort of upgrading. -This also creates a culture of continuous iteration -and improves feedback loops with the platform teams and the developers, -building trust across the organization. -A good place to start is to consume every patch. - -How do we do this? - -#### Small and Continuous Upgrades With Platform Automation Toolkit - -With Platform Automation Toolkit, -platform teams have the tools to create an automated perpetual upgrade machine, -which can continuously take the latest updates when new software is available - -including Tanzu Application Service, VMware Tanzu PKS, Ops Manager, stemcells, products, and services. -In addition, Platform Automation Toolkit allows you to: - -* manage multiple foundations and reduce configuration drift - by tracking changes through source control with - externalized configuration - -* create pipelines that handle installs and upgrades to streamline workflows. - -## Platform Automation Toolkit and Ops Manager - -The following table compares how Ops Manager -and Platform Automation Toolkit might run a typical sequence of Ops Manager operations: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Ops ManagerPlatform Automation Toolkit
When to UseFirst install and minor upgradesConfig changes and patch upgrades
1. Create Ops Manager VMManually prepare IaaS and create Ops Manager VMcreate-vm
2. Configure Who Can Run OpsManually configure internal UAA or external identity providerconfigure-authentication or configure-saml-authentication
3. Configure BOSHManually configure BOSH Directorconfigure-director with settings saved from BOSH Director with same version
4. Add ProductsClick Import a Product to upload file, then + to add tile to Installation Dashboardupload-and-stage-product
5. Configure ProductsManually configure productsconfigure-product with settings saved from tiles with same version
6. Deploy ProductsClick Apply Changesapply-changes
7. UpgradeManually export existing Ops Manager settings, power off the VM, then create a new, updated - Ops Manager VMexport-installation then upgrade-opsman
- -{% include ".internal_link_url.md" %} -{% include ".external_link_url.md" %} diff --git a/docs/inputs-outputs.html.md.erb b/docs/inputs-outputs.html.md.erb new file mode 100644 index 00000000..3d346164 --- /dev/null +++ b/docs/inputs-outputs.html.md.erb @@ -0,0 +1,587 @@ +# Task inputs and outputs + + +This topic describes the inputs that can be provided to the tasks, and their outputs. +Each task can only take a specific set of inputs, indicated under the `inputs` property of the YAML. + +

+To get the slug needed for many of the procedures on this page, go to +My Dashboard +on the Broadcom Support Portal. +This site requires you to log in. +

+ +## director config + +The config director sets the BOSH tile (director) on Tanzu Operations Manager. + +The `config` input for a director task expects to have a `director.yml` file. +The configuration of the `director.yml` is IAAS specific for some properties; that is, networking. + +There are two ways to build a director config. + +1. Using an already deployed Tanzu Operations Manager, you can extract the config using [staged-director-config](./tasks.html#staged-director-config). +2. Deploying a new Tanzu Operations Manager requires more effort for a `director.yml`. + The configuration of director is variables, based on the features enabled. + This `director.yml` is a very basic example for vSphere. + + ```yaml + --- + az-configuration: + - clusters: + - cluster: cluster-name + resource_pool: resource-pool-name + name: AZ01 + + properties-configuration: + iaas_configuration: + vcenter_host: vcenter.example.com + vcenter_username: admin + vcenter_password: password + ...... + director_configuration: + blobstore_type: local + bosh_recreate_on_next_deploy: false + custom_ssh_banner: null + ...... + security_configuration: + generate_vm_passwords: true + trusted_certificates: + syslog_configuration: + enabled: false + + network-assignment: + network: + name: INFRASTRUCTURE + other_availability_zones: [] + singleton_availability_zone: + name: AZ01 + + networks-configuration: + icmp_checks_enabled: false + networks: + - name: NETWORK-NAME + ...... + + resource-configuration: + compilation: + instance_type: + id: automatic + instances: automatic + ...... + ``` + +The IAAS-specific configuration can be found in the Tanzu Operations Manager API documentation. + +What follows is a list of properties that can be set in the `director.yml` +and a link to the API documentation explaining any IAAS specific properties. + +* `az-configuration` - a list of [availability zones](https://developer.broadcom.com/xapis/tanzu-operations-manager-api/3.0//api/v0/staged/director/availability_zones/get/) +* `networks-configuration` - a list of [named networks](https://developer.broadcom.com/xapis/tanzu-operations-manager-api/3.0//api/v0/staged/director/networks/get/) +* `properties-configuration` - [BOSH Director configuration and properties](https://developer.broadcom.com/xapis/tanzu-operations-manager-api/3.0//api/v0/staged/director/properties/get/) + * `director_configuration` - BOSH Director properties + * `security_configuration` - BOSH Director security properties + * `syslog_configuration` - configure the syslog sinks for the BOSH Director +* `resource-configuration` - [IAAS VM flavor](https://developer.broadcom.com/xapis/tanzu-operations-manager-api/3.0//api/v0/staged/products/product_guid/resources/get/) for the BOSH Director +* `vmextensions-configuration` - create/update/delete [VM extensions](https://developer.broadcom.com/xapis/tanzu-operations-manager-api/3.0/vm-extensions/) + +### GCP Shared VPC + +Support for Shared VPC is done by configuring the `iaas_identifier` path for the [infrastructure subnet](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/gcp-prepare-env-manual.html#create_network), +which includes the host project ID, region of the subnet, and the subnet name. + +For example: + +`[HOST_PROJECT_ID]/[NETWORK]/[SUBNET]/[REGION]` + +## download-product-config + +The `config` input for a download product task +can be used with a `download-config.yml` file to download a tile. +Here are examples of the configuration of the `download-config.yml`: + +**Broadcom Support Portal** (formerly **Tanzu Network**) + +```yaml +--- +pivnet-api-token: token +pivnet-file-glob: "*.pivotal" # must be quoted if starting with a * +pivnet-product-slug: product-slug + +# Either product-version OR product-version-regex is required +# product-version-regex: ^1\.2\..*$ # must not be quoted +product-version: 1.2.3 + +# Optional +# pivnet-disable-ssl: true # default - false +# stemcell-iaas: aws # aws|azure|google|openstack|vsphere + # will attempt to download the latest stemcell + # associated with a product by default +# stemcell-version: 90.90 # specific stemcell version to download +# stemcell-heavy: true # will force download of heavy stemcell + # not available on all IaaSes +# blobstore-bucket: bucket # if set, product files will have their slug and + # version prepended. Set if the product will + # ever be stored in a blobstore +``` + +**S3** + +```yaml +--- +pivnet-file-glob: "*.pivotal" # must be quoted if starting with a * +pivnet-product-slug: product-slug +blobstore-bucket: bucket-name +s3-region-name: us-west-1 # if NOT using AWS s3, value is 'region' + +# Required unless `s3-auth-type: iam` +s3-access-key-id: aws-or-minio-key-id +s3-secret-access-key: aws-or-minio-secret-key + +# Optional +# blobstore-product-path: /path/to/product # default - root path of bucket +# blobstore-stemcell-path: /path/to/stemcell # default - root path of bucket +# s3-disable-ssl: true # default - false +# s3-enable-v2-signing: true # available for compatibility +# s3-auth-type: iam # default - accesskey +# s3-endpoint: s3.endpoint.com # required if NOT using AWS S3 +``` + +**GCS** + +```yaml +--- +pivnet-file-glob: "*.pivotal" # must be quoted if starting with a * +pivnet-product-slug: product-slug +blobstore-bucket: bucket-name +gcs-project-id: project-id +gcs-service-account-json: | + { + "type": "service_account", + "project_id": "project-id", + "private_key_id": "fake-key-id", + "private_key": "-----BEGIN PRIVATE KEY-----\fake-key-----END PRIVATE KEY-----\n", + "client_email": "email@project-id.iam.gserviceaccount.com", + "client_id": "123456789876543212345", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/project%40project-id.iam.gserviceaccount.com" + } + +# Optional +# blobstore-product-path: /path/to/product # default - root path of bucket +# blobstore-stemcell-path: /path/to/stemcell # default - root path of bucket +``` + +**Azure** + +```yaml +--- +pivnet-file-glob: "*.pivotal" # must be quoted if starting with a * +pivnet-product-slug: product-slug +blobstore-bucket: container-name +azure-storage-account: 1234567890abcdefghij +azure-storage-key: storage-access-key-from-azure-portal + +# Optional +# blobstore-product-path: /path/to/product # default - root path of bucket +# blobstore-stemcell-path: /path/to/stemcell # default - root path of bucket +``` + +## download-stemcell-product-config + +The `config` input for a download product task +can be used with a `download-config.yml` file to download a stemcell. +The configuration of the `download-config.yml` looks like this: + +<%= partial "examples/download-stemcell-product" %> + +## env + +The `env` input for a task expects to have a `env.yml` file. +This file contains properties for targeting and logging into the Tanzu Operations Manager API. + +**basic auth** + +<%= partial "../examples/env" %> + +**uaa auth** + +<%= partial "../examples/env-uaa" %> + +### Getting the `client-id` and `client-secret` + +Tanzu Operations Manager, by preference, uses Client ID and Client Secret, if these are provided. +To create a Client ID and Client Secret: + +1. Add `uaac target https://YOUR_OPSMANAGER/uaa`. +1. If you are using SAML, `uaac token sso get`. +1. If you are using basic auth, add `uaac token owner get`. +1. Specify the Client ID as `opsman` and leave Client Secret blank. +1. Generate a client ID and secret. + +```bash +uaac client add -i +Client ID: NEW_CLIENT_NAME +New client secret: DESIRED_PASSWORD +Verify new client secret: DESIRED_PASSWORD +scope (list): opsman.admin +authorized grant types (list): client_credentials +authorities (list): opsman.admin +access token validity (seconds): 43200 +refresh token validity (seconds): 43200 +redirect uri (list): +autoapprove (list): +signup redirect url (url): +``` + +## errand config + +The `ERRAND_CONFIG_FILE` input is used in the [`apply-changes`](./tasks.html#apply-changes) task. +This file contains properties for enabling and disabling errands +for a particular run of `apply-changes`. + +To retrieve the default configuration of your product's errands, +you can use [`staged-config`](./tasks.html#staged-config). + +The expected format for this errand config is: + + ```yaml + errands: + sample-product-1: + run_post_deploy: + smoke_tests: default + push-app: false + run_pre_delete: + smoke_tests: true + sample-product-2: + run_post_deploy: + smoke_tests: default + ``` + +## installation + +The file contains the information to restore a Tanzu Operations Manager VM. +The `installation` input for a opsman VM task expects to have a `installation.zip` file. + +This file can be exported from a Tanzu Operations Manager VM using the [export-installation](./tasks.html#export-installation) task. +This file can be imported to a Tanzu Operations Manager VM using the [import-installation](./tasks.html#import-installation) task. + +

+This file cannot be manually created. It is a file that must be generated using the export function of Tanzu Operations Manager.

+ +## Tanzu Operations Manager config + +The config for a Tanzu Operations Manager described IAAS specific information for creating the VM; that is, VM flavor (size) and IP addresses. + +The `config` input for opsman task expects to have a `opsman.yml` file. +The configuration of the `opsman.yml` is IAAS specific. + +

AWS

+ +<%= partial "./examples/opsman-config/aws1" %> + +

Azure

+ +<%= partial "./examples/opsman-config/azure1" %> + +

GCP

+ +<%= partial "./examples/opsman-config/gcp1" %> + +

Openstack

+ +<%= partial "./examples/opsman-config/openstack1" %> + +

vSphere

+ +<%= partial "./examples/opsman-config/vsphere1" %> + +

Additional settings

+ +<%= partial "./examples/opsman-config/settings" %> + +Specific advice and features for the different IaaSs are documented below. + +### AWS + +These required properties are adapted from the instructions outlined in +[Requirements and prerequisites for Tanzu Operations Manager on AWS](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-aws.html). + +<%= partial "ip-addresses" %> + +

+For authentication, you must either set use_instance_profile: true +or provide a secret_key_id and secret_access_key. +You must remove key information if you're using an instance profile. +Using an instance profile allows you to avoid interpolation +because this file then contains no secrets.

+ +### Azure + +The required properties are adapted from the instructions outlined in +[Requirements and prerequisites for Tanzu Operations Manager on Azure](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-azure.html). + +<%= partial "ip-addresses" %> + +### GCP + +The required properties are adapted from the instructions outlined in +[Requirements and prerequisites for Tanzu Operations Manager on GCP](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-gcp.html). + +<%= partial "ip-addresses" %> + +

+For authentication either gcp_service_account or gcp_service_account_name is required. +You must remove the one you are not using. +Note that using gcp_service_account_name allows you to avoid interpolation, +because this file then contains no secrets.

+ +Support for Shared VPC is done using [configuring the `vpc_subnet` path](https://cloud.google.com/vpc/docs/provisioning-shared-vpc#creating_an_instance_in_a_shared_subnet) +to include the host project id, region of the subnet, and the subnet name. + +For example: + +`projects/[HOST_PROJECT_ID]/regions/[REGION]/subnetworks/[SUBNET]` + +### OpenStack + +The required properties are adapted from the instructions in +[Installing and configuring Tanzu Operations Manager on OpenStack](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/openstack-index.html) + +<%= partial "ip-addresses" %> + +### vSphere + +The required properties are adapted from the instructions in +[Installing and configuring Tanzu Operations Manager on vSphere](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/vsphere-index.html) + +## opsman image + +This file is an artifact from the [Broadcom Support portal](https://support.broadcom.com/group/ecx/productdownloads?subfamily=VMware%20Tanzu%20Operations%20Manager), +which contains the VM image for a specific IaaS. +For vSphere and OpenStack, it's a full disk image. +For AWS, GCP, and Azure, it's a YAML file that lists the location +of images that are already available on the IaaS. + +These are examples to download the image artifact for each IaaS +using the [download-product](./tasks.html#download-product) task. + +### opsman.yml + +<%= partial "how-to-guides/opsman-config-tabs" %> + +The `p-automator` CLI includes the ability to extract the Tanzu Operations Manager VM configuration (GCP, AWS, Azure, and VSphere). +This works for Tanzu Operations Managers that are already running. It is useful when [migrating to automation](./how-to-guides/upgrade-existing-opsman.html). + +Usage: + +1. Get the Platform Automation Toolkit image from the [Broadcom Support portal](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Platform%20Automation%20Toolkit). +2. Import the image into `docker` to run the `p-automation` locally. See [Running commands locally](./how-to-guides/running-commands-locally.html). +3. Create a [state file](./inputs-outputs.html#state]) that represents your current VM and IAAS. +4. Invoke the `p-automator` CLI to get the configuration. + +For example, on AWS with an access key and secret key: + +```bash +docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ +p-automator export-opsman-config \ +--state-file=state.yml \ +--aws-region=us-west-1 \ +--aws-secret-access-key some-secret-key \ +--aws-access-key-id some-access-key +``` + +The outputted `opsman.yml` contains the information needed for Platform Automation Toolkit to manage the Tanzu Operations Manager VM. + +### download-product task + +```yaml +- task: download-opsman-image + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: opsman.yml +``` + +## product + +The `product` input requires a single tile file (`.pivotal`) as downloaded from the Broadcom Support portal. + +Here's an example of how to pull the Tanzu Platform for Cloud Foundry tile +using the [download-product](./tasks.html#download-product) task. + +### product.yml + +```yaml +--- +pivnet-api-token: token +pivnet-file-glob: "cf-*.pivotal" +pivnet-product-slug: elastic-runtime +product-version-regex: ^2\.6\..*$ +``` + +### download-product task + +```yaml +- task: download-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: product.yml +``` + +

+This file cannot be manually created. This file must retrieved from the Broadcom Support portal.

+ +## product config + +There are two ways to build a product config. + +1. Using an already deployed product (tile), you can extract the config using [staged-config](./tasks.html#staged-config). +1. Use an example and fill in the values based on the meta information from the tile. +This `product.yml` is a very basic example for `healthwatch`. + +<%= partial "../examples/product" %> + +The following is a list of properties that can be set in the `product.yml` +and a link to the API documentation explaining the properties. + +* `product-properties` - [Tanzu Operations Manager tile properties](https://developer.broadcom.com/xapis/tanzu-operations-manager-api/3.0//api/v0/staged/director/properties/get/) + +* `network-properties` - a list of [Tanzu Operations Manager named networks](https://apigw-test.vmware.com/stg/v1/m12/api/TanzuOperationsManagerAPIDocumentation/3-0/opsman-api/#tag/Networks-and-AZs-assignment/paths/~1api~1v0~1staged~1products~1%7Bproduct_guid%7D~1networks_and_azs/get) + + +* `resource-config` - for the jobs of the [Tanzu Operations Manager tile](https://developer.broadcom.com/xapis/tanzu-operations-manager-api/3.0//api/v0/staged/products/product_guid/jobs/job_guid/resource_config/put/) + +## state + +This file contains the meta-information needed to manage the Tanzu Operations Manager VM. +The `state` input for a opsman VM task expects to have a `state.yml` file. + +The `state.yml` file contains two properties: + +1. `iaas` is the IAAS the Tanzu Operations Manager VM is hosted on. (`gcp`, `vsphere`, `aws`, `azure`, `openstack`) +2. `vm_id` is the VM unique identifier for the VM. For some IAAS, the VM ID is the VM name. + + Different IaaS uniquely identify VMs differently; + here are examples for what this file should look like, + depending on your IAAS: + + **AWS** + + <%= partial "../examples/state/aws" %> + + **Azure** + + <%= partial "../examples/state/azure" %> + + **GCP** + + <%= partial "../examples/state/gcp" %> + + **OpenStack** + + <%= partial "../examples/state/openstack" %> + + **vSphere** + + <%= partial "../examples/state/vsphere" %> + +## stemcell + +This `stemcell` input requires the stemcell tarball (`.tgz`) as downloaded from the Broadcom Support portal. +It must be in the original filename as that is used by Tanzu Operations Manager to parse metadata. +The filename might look something like `bosh-stemcell-621.76-vsphere-esxi-ubuntu-xenial-go_agent.tgz`. + +

+This file cannot be manually created. This file must retrieved from the Broadcom Support portal.

+ +Here's an example of how to pull the vSphere stemcell +using the [download-product](./tasks.html#download-product) task. + +### stemcell.yml + +**AWS** + +```yaml +--- +pivnet-api-token: token +pivnet-file-glob: "bosh-stemcell-*-aws*.tgz" +pivnet-product-slug: stemcells-ubuntu-xenial +product-version-regex: ^170\..*$ +``` + +**Azure** + +```yaml +--- +pivnet-api-token: token +pivnet-file-glob: "bosh-stemcell-*-azure*.tgz" +pivnet-product-slug: stemcells-ubuntu-xenial +product-version-regex: ^170\..*$ +``` + +**GCP** + +```yaml +--- +pivnet-api-token: token +pivnet-file-glob: "bosh-stemcell-*-google*.tgz" +pivnet-product-slug: stemcells-ubuntu-xenial +product-version-regex: ^170\..*$ +``` + +**OpenStack** + +```yaml +--- +pivnet-api-token: token +pivnet-file-glob: "bosh-stemcell-*-openstack*.tgz" +pivnet-product-slug: stemcells-ubuntu-xenial +product-version-regex: ^170\..*$ +``` + +**vSphere** + +```yaml +--- +pivnet-api-token: token +pivnet-file-glob: "bosh-stemcell-*-vsphere*.tgz" +pivnet-product-slug: stemcells-ubuntu-xenial +product-version-regex: ^170\..*$ +``` + + +### download-product task + +```yaml +- task: download-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: stemcell.yml +``` + +### assign-stemcell-task + +This artifact is an output of [`download-product`](./tasks.html#download-product) +located in the `assign-stemcell-config` output directory. + +This file should resemble the following: + +```yaml +product: cf +stemcell: "97.190" +``` + +## telemetry + +The `config` input for the [collect-telemetry](./tasks.html#collect-telemetry) task +can be used with a `telemetry.yml` file to collect data for VMware +so that VMware staff can learn and measure results +to help put customer experience at the forefront of their product decisions. +The configuration of the `telemetry.yml` looks like this: + +<%= partial "../examples/telemetry" %> diff --git a/docs/inputs-outputs.md b/docs/inputs-outputs.md deleted file mode 100644 index 6335b34a..00000000 --- a/docs/inputs-outputs.md +++ /dev/null @@ -1,434 +0,0 @@ -## Inputs -These are the inputs that can be provided to the tasks. -Each task can only take a specific set, indicated under the `inputs` property of the YAML. - -### director config - -The config director will set the bosh tile (director) on Ops Manager. - -The `config` input for a director task expects to have a `director.yml` file. -The configuration of the `director.yml` is IAAS specific for some properties -- i.e. networking. - -There are two ways to build a director config. - -1. Using an already deployed Ops Manager, you can extract the config using [staged-director-config]. -2. Deploying a brand new Ops Manager requires more effort for a `director.yml`. - The configuration of director is variables based on the features enabled. - For brevity, this `director.yml` is a basic example for vsphere. - ----excerpt--- "examples/director-configuration" - -The IAAS specific configuration can be found in the Ops Manager API documentation. - -Included below is a list of properties that can be set in the `director.yml` -and a link to the API documentation explaining any IAAS specific properties. - -* `az-configuration` - a list of availability zones [Ops Manager API][opsman-api-azs] -* `network-assignment` - the network the bosh director is deployed to [Ops Manager API][opsman-api-network-az-assignment] -* `networks-configuration` - a list of named networks [Ops Manager API][opsman-api-networks] -* `properties-configuration` - * `iaas_configuration` - configuration for the bosh IAAS CPI [Ops Manager API][opsman-api-director-properties] - * `director_configuration` - properties for the bosh director [Ops Manager API][opsman-api-director-properties] - * `security_configuration` - security properties for the bosh director [Ops Manager API][opsman-api-director-properties] - * `syslog_configuration` - configure the syslog sinks for the bosh director [Ops Manager API][opsman-api-director-properties] -* `resource-configuration` - IAAS VM flavor for the bosh director [Ops Manager API][opsman-api-config-resources] -* `vmextensions-configuration` - create/update/delete VM extensions [Ops Manager API][opsman-api-vm-extension] - -#### GCP Shared VPC - -Support for Shared VPC is done via configuring the `iaas_identifier` path for the [infrastructure subnet][gcp-create-network], -which includes the host project id, region of the subnet, and the subnet name. - -For example: - -`[HOST_PROJECT_ID]/[NETWORK]/[SUBNET]/[REGION]` - -### download-product-config - -The `config` input for a download product task -can be used with a `download-config.yml` file to download a tile. -The configuration of the `download-config.yml` looks like this: - -=== "Tanzu Network" - ---excerpt--- "examples/download-product-config-pivnet" -=== "S3" - ---excerpt--- "examples/download-product-config-s3" -=== "GCS" - ---excerpt--- "examples/download-product-config-gcs" -=== "Azure" - ---excerpt--- "examples/download-product-config-azure" - -### download-stemcell-product-config - -The `config` input for a download product task -can be used with a `download-config.yml` file to download a stemcell. -The configuration of the `download-config.yml` looks like this: - ----excerpt--- "examples/download-stemcell-product-config" - -### env - -The `env` input for a task expects to have a `env.yml` file. -This file contains properties for targeting and logging into the Ops Manager API. - -=== "basic auth" - ---excerpt--- "examples/env" -=== "uaa auth" - ---excerpt--- "examples/env-uaa" - -#### Getting the `client-id` and `client-secret` - -Ops Manager will by preference use Client ID and Client Secret if provided. -To create a Client ID and Client Secret - -1. `uaac target https://YOUR_OPSMANAGER/uaa` -1. `uaac token sso get` if using SAML or `uaac token owner get` if using basic auth. Specify the Client ID as `opsman` and leave Client Secret blank. -1. Generate a client ID and secret - -```bash -uaac client add -i -Client ID: NEW_CLIENT_NAME -New client secret: DESIRED_PASSWORD -Verify new client secret: DESIRED_PASSWORD -scope (list): opsman.admin -authorized grant types (list): client_credentials -authorities (list): opsman.admin -access token validity (seconds): 43200 -refresh token validity (seconds): 43200 -redirect uri (list): -autoapprove (list): -signup redirect url (url): -``` - -### errand config - -The `ERRAND_CONFIG_FILE` input for the [`apply-changes`][apply-changes] task. -This file contains properties for enabling and disabling errands -for a particular run of `apply-changes`. - -To retrieve the default configuration of your product's errands, -[`staged-config`][staged-config] can be used. - -The expected format for this errand config is as follows: - - ```yaml - errands: - sample-product-1: - run_post_deploy: - smoke_tests: default - push-app: false - run_pre_delete: - smoke_tests: true - sample-product-2: - run_post_deploy: - smoke_tests: default - ``` - -### installation - -The file contains the information to restore an Ops Manager VM. -The `installation` input for a opsman VM task expects to have a `installation.zip` file. - -This file can be exported from an Ops Manager VM using the [export-installation][export-installation]. -This file can be imported to an Ops Manager VM using the [import-installation][import-installation]. - -!!! warning - This file cannot be manually created. It is a file that must be generated via the export function of Ops Manager. - -### Ops Manager config -The config for an Ops Manager described IAAS specific information for creating the VM -- i.e. VM flavor (size), IP addresses - -The `config` input for opsman task expects to have a `opsman.yml` file. -The configuration of the `opsman.yml` is IAAS specific. - -=== "AWS" - ---excerpt--- "examples/aws-configuration" -=== "Azure" - ---excerpt--- "examples/azure-configuration" -=== "GCP" - ---excerpt--- "examples/gcp-configuration" -=== "Openstack" - ---excerpt--- "examples/openstack-configuration" -=== "vSphere" - ---excerpt--- "examples/vsphere-configuration" -=== "Additional Settings" - ---excerpt--- "examples/opsman-settings" - -Specific advice and features for the different IaaSs are documented below -#### AWS -These required properties are adapted from the instructions outlined in -[Launching an Ops Manager Director Instance on AWS][manual-aws] - -{% include '.ip-addresses.md' %} - -!!! info "Using instance_profile to Avoid Secrets" - For authentication you must either set `use_instance_profile: true` - or provide a `secret_key_id` and `secret_access_key`. - You must remove key information if you're using an instance profile. - Using an instance profile allows you to avoid interpolation, - as this file then contains no secrets. - - -#### Azure -The required properties are adapted from the instructions outlined in -[Launching an Ops Manager Director Instance on Azure][manual-azure] - -{% include '.ip-addresses.md' %} - -#### GCP -The required properties are adapted from the instructions outlined in -[Launching an Ops Manager Director Instance on GCP][manual-gcp] - -{% include '.ip-addresses.md' %} - -!!! info "Using a Service Account Name to Avoid Secrets" - For authentication either `gcp_service_account` or `gcp_service_account_name` is required. - You must remove the one you are not using - note that using `gcp_service_account_name` allows you to avoid interpolation, - as this file then contains no secrets. - -Support for Shared VPC is done via -[configuring the `vpc_subnet` path][gcp-shared-vpc] -to include the host project id, region of the subnet, and the subnet name. - -For example: - -`projects/[HOST_PROJECT_ID]/regions/[REGION]/subnetworks/[SUBNET]` - -#### Openstack - -The required properties are adapted from the instructions outlined in -[Launching an Ops Manager Director Instance on Openstack][manual-openstack] - -{% include '.ip-addresses.md' %} - -#### vSphere - -The required properties are adapted from the instructions outlined in -[Deploying BOSH and Ops Manager to vSphere][manual-vsphere] - -### opsman image - -This file is an [artifact from Tanzu Network](https://network.pivotal.io/products/ops-manager), -which contains the VM image for a specific IaaS. -For vsphere and openstack, it's a full disk image. -For AWS, GCP, and Azure, it's a YAML file listing the location -of images that are already available on the IaaS. - -These are examples to download the image artifact for each IaaS -using the [download-product][download-product] task. - -#### opsman.yml - -{% include "how-to-guides/.opsman-config-tabs.md" %} - -The `p-automator` CLI includes the ability to extract the Ops Manager VM configuration (GCP, AWS, Azure, and VSphere). -This works for Ops Managers that are already running and useful when [migrating to automation][upgrade-how-to]. - -Usage: - -1. Get the Platform Automation Toolkit image from Tanzu Network. -1. Import that image into `docker` to run the [`p-automation` locally][running-commands-locally]. -1. Create a [state file][state] that represents your current VM and IAAS. -1. Invoke the `p-automator` CLI to get the configuration. - -For example, on AWS with an access key and secret key: - -```bash -docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ -p-automator export-opsman-config \ ---state-file=state.yml \ ---aws-region=us-west-1 \ ---aws-secret-access-key some-secret-key \ ---aws-access-key-id some-access-key -``` - -The outputted `opsman.yml` contains the information needed for Platform Automation Toolkit to manage the Ops Manager VM. - -#### download-product task - -```yaml -- task: download-opsman-image - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: opsman.yml -``` - -### product - -The `product` input requires a single tile file (`.pivotal`) as downloaded from Tanzu Network. - -Here's an example of how to pull the Tanzu Application Service tile -using the [download-product][download-product] task. - -#### product.yml - -```yaml ---- -pivnet-api-token: token -pivnet-file-glob: "cf-*.pivotal" -pivnet-product-slug: elastic-runtime -product-version-regex: ^2\.6\..*$ -``` - -#### download-product task - -```yaml -- task: download-stemcell - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: product.yml -``` - -!!! warning - This file cannot be manually created. It is a file that must retrieved from Tanzu Network. - -### product config - -There are two ways to build a product config. - -1. Using an already deployed product (tile), you can extract the config using [staged-config]. -1. Use an example and fill in the values based on the meta information from the tile. -For brevity, this `product.yml` is a basic example for `healthwatch`. - ----excerpt--- "examples/product-configuration" - -Included below is a list of properties that can be set in the `product.yml` -and a link to the API documentation explaining the properties. - -* `product-properties` - properties for the tile [Ops Manager API][opsman-api-config-products] -* `network-properties` - a list of named networks to deploy the VMs to [Ops Manager API][opsman-api-config-networks] -* `resource-config` - for the jobs of the tile [Ops Manager API][opsman-api-config-resources] - -### state - -This file contains that meta-information needed to manage the Ops Manager VM. -The `state` input for a opsman VM task expects to have a `state.yml` file. - -The `state.yml` file contains two properties: - -1. `iaas` is the IAAS the ops manager VM is hosted on. (`gcp`, `vsphere`, `aws`, `azure`, `openstack`) -2. `vm_id` is the VM unique identifier for the VM. For some IAAS, the VM ID is the VM name. - -Different IaaS uniquely identify VMs differently; -here are examples for what this file should look like, -depending on your IAAS: - -=== "AWS" - ``` yaml - --8<-- 'docs/examples/state/aws.yml' - ``` - -=== "Azure" - ``` yaml - --8<-- 'docs/examples/state/azure.yml' - ``` - -=== "GCP" - ``` yaml - --8<-- 'docs/examples/state/gcp.yml' - ``` - -=== "OpenStack" - ``` yaml - --8<-- 'docs/examples/state/openstack.yml' - ``` - -=== "vSphere" - ``` yaml - --8<-- 'docs/examples/state/vsphere.yml' - ``` - -### stemcell -This `stemcell` input requires the stemcell tarball (`.tgz`) as downloaded from Tanzu Network. -It must be in the original filename as that is used by Ops Manager to parse metadata. -The filename could look like `bosh-stemcell-621.76-vsphere-esxi-ubuntu-xenial-go_agent.tgz`. - -!!! warning - This file cannot be manually created. It is a file that must retrieved from Tanzu Network. - -Here's an example of how to pull the vSphere stemcell -using the [download-product][download-product] task. - -#### stemcell.yml - -=== "AWS" - ```yaml - --- - pivnet-api-token: token - pivnet-file-glob: "bosh-stemcell-*-aws*.tgz" - pivnet-product-slug: stemcells-ubuntu-xenial - product-version-regex: ^170\..*$ - ``` - -=== "Azure" - ```yaml - --- - pivnet-api-token: token - pivnet-file-glob: "bosh-stemcell-*-azure*.tgz" - pivnet-product-slug: stemcells-ubuntu-xenial - product-version-regex: ^170\..*$ - ``` - -=== "GCP" - ```yaml - --- - pivnet-api-token: token - pivnet-file-glob: "bosh-stemcell-*-google*.tgz" - pivnet-product-slug: stemcells-ubuntu-xenial - product-version-regex: ^170\..*$ - ``` - -=== "OpenStack" - ```yaml - --- - pivnet-api-token: token - pivnet-file-glob: "bosh-stemcell-*-openstack*.tgz" - pivnet-product-slug: stemcells-ubuntu-xenial - product-version-regex: ^170\..*$ - ``` - -=== "vSphere" - ```yaml - --- - pivnet-api-token: token - pivnet-file-glob: "bosh-stemcell-*-vsphere*.tgz" - pivnet-product-slug: stemcells-ubuntu-xenial - product-version-regex: ^170\..*$ - ``` - - -#### download-product task - -```yaml -- task: download-stemcell - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: stemcell.yml -``` - -#### assign-stemcell-task -This artifact is an output of [`download-product`][download-product] -located in the `assign-stemcell-config` output directory. - -This file should resemble the following: -```yaml -product: cf -stemcell: "97.190" -``` - -### telemetry - -The `config` input for the [collect-telemetry][collect-telemetry] task -can be used with a `telemetry.yml` file to collect data for VMware -so they can learn and measure results -in order to put customer experience at the forefront of their product decisions. -The configuration of the `telemetry.yml` looks like this: - ----excerpt--- "examples/telemetry" - -{% include ".internal_link_url.md" %} -{% include ".external_link_url.md" %} diff --git a/docs/pipeline-design/configuration-management-strategies.html.md.erb b/docs/pipeline-design/configuration-management-strategies.html.md.erb new file mode 100644 index 00000000..d8eda4ff --- /dev/null +++ b/docs/pipeline-design/configuration-management-strategies.html.md.erb @@ -0,0 +1,390 @@ +# Configuration management strategies + +When building pipelines, there are many strategies +for structuring your configuration in source control +and in pipeline design. +No single method can cover all situations. +This topic presents some of the possibilities and their uses +so that you can choose the best approach +for your situation. + +## Single repository for each foundation + +This is the simplest approach, +and it's the default assumed in all of the examples in these topics, +unless there is a clear reason to use a different approach. +It entails using a single Git repository for each foundation. + +Tracking foundation changes are simple, +getting started is easy, +duplicating foundations involves only cloning a repository, +and configuration files are not difficult to understand. + +This is the strategy used in +[Install Tanzu Operations Manager](../how-to-guides/installing-opsman.html) and +[Upgrading an existing Tanzu Operations Manager](../how-to-guides/upgrade-existing-opsman.html). + +This example configuration repository +uses the "Single Repository for each Foundation" pattern: + +``` +├── auth.yml +├── pas.yml +├── director.yml +├── download-opsman.yml +├── download-product-configs +│   ├── healthwatch.yml +│   ├── opsman.yml +│   ├── pas-windows.yml +│   ├── pas.yml +│   └── telemetry.yml +├── env.yml +├── healthwatch.yml +├── opsman.yml +└── pas-windows.yml +``` + +Notice that there is only one subdirectory +and that all other files are in the base directory. +This minimizes parameter mapping in the platform-automation tasks. +For example, in the [`configure-director`](../tasks.html#configure-director) +step: + +```yaml +- task: configure-director + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-director.yml + input_mapping: + config: configuration + env: configuration +``` + +You map the config files +to the expected input named `env` of the `configure-director` task. +Because the `configure-director` task's default `ENV` parameter is `env.yml`, +it automatically uses the `env.yml` file in the configuration repo. +You do not need to explicitly name the `ENV` parameter for the task. +This also works for `director.yml`. + +Another option for mapping resources to inputs is discussed in +[Matching resource names and input names](#matching-resource-names-and-input-names). + +For reference, here is the `configure-director` task: + +

+The inputs, outputs, params, filename, and filepath +of this task file are part of its semantically versioned API. +See www.semver.org for information about semantic versioning. +

+ +<%= partial "../tasks/configure-director" %> + +## Multiple foundations with one repository + +Multiple foundations can use a single Git configuration source, +but have different variables loaded +from a foundation-specific vars file, CredHub, and so on. + +This strategy can reduce foundation drift +and streamline the configuration promotion process between foundations. + +This is the strategy used in the [reference pipeline](../pipelines/multiple-products.html). + +### Overview + +The [reference pipeline](../pipelines/multiple-products.html) uses a public [config repo](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config) +with all secrets stored in the CredHub belonging to Concourse. + +The design considerations for this strategy, as implemented, are: + +- Prioritization of ease of configuration promotion is prioritized + over minimization of configuration + file duplication between foundations. +- Global, non-public variables can be overwritten by + foundation-specific variables based on `VARS_FILES` ordering. +- Product configuration can differ between product versions, + so the entire configuration file is promoted between foundations. +- No outside tooling or additional preparation tasks + are required to use this strategy. + It makes use of only concepts and workflows + built in to Platform Automation and Concourse. +- There are no significant differences between the required setup of the foundations. + + This doesn't mean that this strategy cannot be used + with more complicated differences. + If the pipelines need to be different for one reason or another, + you might want the `pipelines` directory to be at the foundation level + and for the `pipeline.yml` to be foundation-specific. + + The [reference pipeline](../pipelines/multiple-products.html) handles the different environments via a `fly` variable. + The pipeline set script is found in the [`scripts`](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config/blob/develop/scripts/update-reference-pipeline.sh) directory. + +### Structure + +A simplified view of the [config-repo](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config) is represented below: + +``` +├── download-product-pivnet +│   ├── download-opsman.yml +│   └── download-pks.yml +├── foundations +│   ├── config +│   │   ├── auth.yml +│   │   └── env.yml +│   ├── development +│   │   ├── config +│   │   │   ├── director.yml +│   │   │   ├── download-opsman.yml +│   │   │   ├── download-pks.yml +│   │   │   ├── opsman.yml +│   │   │   └── pks.yml +│   │   └── vars +│   │   ├── director.yml +│   │   ├── pks.yml +│   │   └── versions.yml +│   ├── sandbox +│   │   ├── config +│   │   │   ├── director.yml +│   │   │   ├── download-opsman.yml +│   │   │   ├── download-pks.yml +│   │   │   ├── opsman.yml +│   │   │   └── pks.yml +│   │   └── vars +│   │   ├── director.yml +│   │   ├── pks.yml +│   │   └── versions.yml +│   └── vars +│   └── director.yml +├── pipelines +│   ├── download-products.yml +│   └── pipeline.yml +└── scripts + └── update-reference-pipeline.sh +``` + +Starting with with the top-level folders: + +- `download-product-pivnet` contains config files + for downloading products from the Broadcom Support portal + and uploading these products to a blobstore. +- `foundations` contains all of the configuration files + and variable files for all foundations. +- `pipelines` contains the pipeline files + for the resources pipeline and the foundation pipelines. +- `scripts` contains the BASH script for setting all of the pipelines. + +#### foundations + +The `foundations` folder contains all of the foundations plus two additional folders: + +- `config` contains any global config files, in this case, `env.yml` and `auth.yml`. + These files are used by `om` and their structure is not foundation-dependent, + so each foundation pipeline fills out the parameterized variables + from the Concourse credential manager. +- `vars` contains foundation-independent variables for any of the configuration files. + In this example, all of the foundations are on a single IAAS, + so the common vars tend to be IAAS-specific. + These files can also include any other variables determined + to be consistently the same across foundations. + +#### foundations/ + +For each foundation, there are two folders: + +- `config` contains the configuration files that `om` uses for: + - Downloading products from a blobstore; specified with the prefix `download-` + - Configuring a product; specified by `.yml` + - Configuring the BOSH director; specified with `director.yml` + - Configuring the Tanzu Operations Manager VM; specified with `opsman.yml` +- `vars` contains any foundation-specific variables used by Platform Automation tasks. + These variables fill in any variables `((parameterized))` in config files + that are not stored in Concourse's credential manager. + +### Config promotion example + +This example, shows how to update PKS from 1.3.8 to 1.4.3. +We will start with updating this tile in our `sandbox` foundation +and then promote the configuration to the `development` foundation. + +This procedure assumes that you are viewing this example +from the root of the [Platform Automation Reference Pipeline Configs repo](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config). + +1. Update `download-product-pivnet/download-pks.yml`: + + ```diff + - product-version-regex: ^1\.3\..*$ + + product-version-regex: ^1\.4\..*$ + ``` + +2. Commit this change and run the [resource pipeline](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config/blob/develop/pipelines/download-products.yml). +This downloads the 1.4.3 PKS tile +and makes it available on S3. + +1. Update the versions file for sandbox: + + ```diff + - pks-version: 1.3.8 + + pks-version: 1.4.3 + ``` + +2. Run the `upload-and-stage-pks` job, but do not run the `configure-pks` or `apply-product-changes` jobs. + + This ensures that the `apply-changes` step doesn't automatically fail + if there are configuration changes + between what what is currently deployed + and the new tile. + +3. Log in to the Tanzu Operations Manager UI. If the tile has unconfigured properties: + + 1. Manually configure the tile and deploy it. + + 2. Re-export the staged-config: + + ``` + om -e env.yml staged-config --include-credentials -p pivotal-container-service + ``` + + 3. Merge the resulting config with the existing `foundations/sandbox/config/pks.yml`. + + Doing a diff of the previous `pks.yml` + against the new one makes this process much easier. + + 4. Pull out the new parametrizable variables + and store them in `foundations/vars/pks.yml` or `foundations/sandbox/vars/pks.yml`, + or directly into CredHub. + Note that there may be nothing new to parameterize. + This is okay, and makes the process go faster. + + 5. Commit any changes. + +4. Run the `configure-pks` and `apply-product-changes` jobs on the `sandbox` pipeline. + +5. Assuming the `sandbox` pipeline is all green, + copy the `foundations/sandbox/config` folder into `foundations/development/config`. + +6. Modify the `foundations/development/vars/versions.yml` and `foundations/development/vars/pks.yml` files + to have all of the property references that exist in their sandbox counterparts + and the foundation-specific values. + +7. Commit these changes and run the `development` pipeline all the way through. + +

+A quicker development deploy process: +Since all of the legwork is done manually in the sandbox environment, +there is no need to log in to the development Tanzu Operations Manager environment. +
+If there are no configuration changes, the only file that needs to be promoted is versions.yml.

+ + +## Advanced pipeline design + +### Matching resource names and input names + +As an alternative to `input_mapping`, +you can create resources that match the input names on the tasks. +Even if these resources map to the same git repository and branch, +they can be declared as separate inputs. + +```yaml +- name: config + type: git + source: + private_key: ((repo-key.private_key)) + uri: ((repo-uri)) + branch: develop + +- name: env + type: git + source: + private_key: ((repo-key.private_key)) + uri: ((repo-uri)) + branch: develop +``` + +As long as each of these resources have an associated `get: ` +in the job, they will automatically be mapped to the inputs of the tasks in that job: + +``` +- name: configure-director + serial: true + plan: + - aggregate: + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: config + passed: [previous-job] + - get: env + passed: [previous-job] + - task: configure-director + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-director.yml +``` + +
+If you have two resources defined with the same git repository, such as env and config, +and have a passed constraint on only one of them, +there is a possibility that they will not be at the same SHA for any given job in your pipeline. + +Example: +
+  
+   - get: config
+   - get: env
+     passed: [previous-job]
+  
+
+
+ +### Modifying resources in-place + +This section uses a Concourse feature that allows inputs and outputs to have the same name. +This feature is only available in Concourse 5+. The example that follows does not work with Concourse v4. + +In certain circumstances, resources can be modified by one task in a job +for use later in that same job. A few tasks that offer this ability include: + +- [credhub-interpolate](../tasks.html#credhub-interpolate) +- [prepare-tasks-with-secrets](../tasks.html#prepare-tasks-with-secrets) +- [prepare-image](../tasks.html#prepare-image) + +For each of these tasks, `output_mapping` can be used to overwrite +an input with a modified input for use with tasks later in that job. + +In the following example, `prepare-tasks-with-secrets` takes in the +`platform-automation-tasks` input and modifies it for the `download-product` +task. For a more detailed explanation see [Secrets Handling](../concepts/secrets-handling.html). + +```yaml +- name: configure-director + serial: true + plan: + - aggregate: + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: config + - get: env + - task: prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + output_mapping: + tasks: platform-automation-tasks + params: + CONFIG_PATHS: config + - task: download-product + image: platform-automation-image + # The following platform-automation-tasks have been modified + # by the prepare-tasks-with-secrets task + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-ops-manager.yml +``` diff --git a/docs/pipeline-design/configuration-management-strategies.md b/docs/pipeline-design/configuration-management-strategies.md deleted file mode 100644 index 2fd0d711..00000000 --- a/docs/pipeline-design/configuration-management-strategies.md +++ /dev/null @@ -1,346 +0,0 @@ -# Configuration Management Strategies - -When building pipelines, -there are many possible strategies -for structuring your configuration in source control -as well as in pipeline design. -No single method can cover all situations. -After reading this document, -we hope you feel equipped to select an approach. - -## Single Repository for Each Foundation - -This is the simplest thing that could possibly work. -It's the default assumed in all our examples, -unless we've articulated a specific reason to choose a different approach. -It entails using a single Git repository for each foundation. - -Tracking foundation changes are simple, -getting started is easy, -duplicating foundations is simply a matter of cloning a repository, -and configuration files are not difficult to understand. - -This is the strategy used throughout the -[Install Ops Man How to Guide][install-how-to] and the -[Upgrading an Existing Ops Manager How to Guide][upgrade-how-to]. - -Let's examine an example configuration repository -that uses the "Single Repository for each Foundation" pattern: - -``` -├── auth.yml -├── pas.yml -├── director.yml -├── download-opsman.yml -├── download-product-configs -│   ├── healthwatch.yml -│   ├── opsman.yml -│   ├── pas-windows.yml -│   ├── pas.yml -│   └── telemetry.yml -├── env.yml -├── healthwatch.yml -├── opsman.yml -└── pas-windows.yml -``` - -Notice that there is only one subdirectory -and that all other files are at the repositories base directory. -_This minimizes parameter mapping in the platform-automation tasks_. -For example, in the [`configure-director`][configure-director] -step: - ----excerpt--- "examples/configure-director-usage" - -We map the config files -to the expected input named `env` of the `configure-director` task. -Because the `configure-director` task's default `ENV` parameter is `env.yml`, -it automatically uses the `env.yml` file in our configuration repo. -We do not need to explicitly name the `ENV` parameter for the task. -This also works for `director.yml`. - -Another option for mapping resources to inputs -is discussed in the [Matching Resource Names and Input Names][matching-resource-names-and-input-names] section. - -For reference, here is the `configure-director` task: - ----excerpt--- "tasks/configure-director" - -## Multiple Foundations with one Repository - -Multiple foundations may use a single Git configuration source -but have different variables loaded -from a foundation specific vars file, Credhub, etc. - -This strategy can reduce foundation drift -and streamline the configuration promotion process between foundations. - -**This is the strategy used in our [Reference Pipeline][reference-pipeline]** - -### Overview - -The [Reference Pipeline][reference-pipeline] uses a public [config repo][ref-config-repo] -with all secrets stored in our Concourse's Credhub. - -The design considerations for this strategy as implemented are as follows: - -- Prioritization of ease of configuration promotion - over minimization of configuration - file duplication between foundations. -- Global, non-public variables can be overwritten by - foundation-specific variables based on `VARS_FILES` ordering. -- Product configuration can differ between product versions, - so the entire configuration file is promoted between foundations. -- No outside tooling or additional preparation tasks - are required to use this strategy. - It makes use of only concepts and workflows - built-in to Platform Automation and Concourse. -- No significant differences between the required setup of foundations. - - This doesn't mean that this strategy cannot be used - with more complicated differences. - If the pipelines need to be different for one reason or another, - you might want the `pipelines` directory to be at the foundation level - and for the `pipeline.yml` to be foundation-specific. - - The Reference Pipeline handles the different environments via a `fly` variable. - The pipeline set script is found in the [`scripts`][ref-config-update-script] directory. - -### Structure - -A simplified view of the [config-repo][ref-config-repo] is represented below: - -``` -├── download-product-pivnet -│   ├── download-opsman.yml -│   └── download-pks.yml -├── foundations -│   ├── config -│   │   ├── auth.yml -│   │   └── env.yml -│   ├── development -│   │   ├── config -│   │   │   ├── director.yml -│   │   │   ├── download-opsman.yml -│   │   │   ├── download-pks.yml -│   │   │   ├── opsman.yml -│   │   │   └── pks.yml -│   │   └── vars -│   │   ├── director.yml -│   │   ├── pks.yml -│   │   └── versions.yml -│   ├── sandbox -│   │   ├── config -│   │   │   ├── director.yml -│   │   │   ├── download-opsman.yml -│   │   │   ├── download-pks.yml -│   │   │   ├── opsman.yml -│   │   │   └── pks.yml -│   │   └── vars -│   │   ├── director.yml -│   │   ├── pks.yml -│   │   └── versions.yml -│   └── vars -│   └── director.yml -├── pipelines -│   ├── download-products.yml -│   └── pipeline.yml -└── scripts - └── update-reference-pipeline.sh -``` - -Let's start with the top-level folders: - -- `download-product-pivnet` contains config files - for downloading products from pivnet - and uploading those products to a blobstore. -- `foundations` contains all of the configuration files - and variable files for all foundations. -- `pipelines` contains the pipeline files - for the resources pipeline and the foundation pipelines. -- `scripts` contains the BASH script for setting all of the pipelines. - -#### `foundations` - -Within the `foundations` folder, we have all of our foundations as well as two additional folders: - -- `config` contains any global config files -- in our case, `env.yml` and `auth.yml`. - These files are used by `om` and their structure is not foundation-dependent. - As a result, each foundation pipeline fills out the parameterized variables - from Concourse's credential manager. -- `vars` contains foundation-independent variables for any of the configuration files. - In this example, all of the foundations are on a single IAAS, - so the common vars tend to be IAAS-specific. - These files can also include any other variables determined - to be consistently the same across foundations. - -#### `foundations/` - -For each foundation, we have two folders: - -- `config` contains the configuration files that `om` uses for: - - Downloading products from a blobstore; specified with the prefix `download-` - - Configuring a product; specified by `.yml` - - Configuring the BOSH director; specified with `director.yml` - - Configuring the Ops Manager VM; specified with `opsman.yml` -- `vars` contains any foundation-specific variables used by Platform Automation tasks. - These variables will fill in any variables `((parameterized))` in config files - that are not stored in Concourse's credential manager. - -### Config Promotion Example - -In this example, we will be updating PKS from 1.3.8 to 1.4.3. -We will start with updating this tile in our `sandbox` foundation -and then promote the configuration to the `development` foundation. -We assume that you are viewing this example -from the root of the [Reference Pipeline Config Repo][ref-config-repo]. - -1. Update `download-product-pivnet/download-pks.yml`: - - ```diff - - product-version-regex: ^1\.3\..*$ - + product-version-regex: ^1\.4\..*$ - ``` - -1. Commit this change and run the [resource pipeline][ref-config-resource-pipeline] -which will download the 1.4.3 PKS tile -and make it available on S3. - -1. Update the versions file for sandbox: - - ```diff - - pks-version: 1.3.8 - + pks-version: 1.4.3 - ``` - -1. Run the `upload-and-stage-pks` job, but do not run the `configure-pks` or `apply-product-changes` jobs. - - This makes it so that the `apply-changes` step won't automatically fail - if there are configuration changes - between what we currently have deployed - and the new tile. - -1. Login to the Ops Manager UI. If the tile has unconfigured properties: - - 1. Manually configure the tile and deploy - - 1. Re-export the staged-config: - - ``` - om -e env.yml staged-config --include-credentials -p pivotal-container-service - ``` - - 1. Merge the resulting config with the existing `foundations/sandbox/config/pks.yml`. - - Diffing the previous `pks.yml` - and the new one makes this process much easier. - - 1. Pull out new parameterizable variables - and store them in `foundations/vars/pks.yml` or `foundations/sandbox/vars/pks.yml`, - or directly into Credhub. - Note, there may be nothing new to parameterize. - This is okay, and makes the process go faster. - - 1. Commit any changes. - -1. Run the `configure-pks` and `apply-product-changes` jobs on the `sandbox` pipeline. - -1. Assuming the `sandbox` pipeline is all green, - copy the `foundations/sandbox/config` folder into `foundations/development/config`. - -1. Modify the `foundations/development/vars/versions.yml` and `foundations/development/vars/pks.yml` files - to have all of the property references that exist in their sandbox counterparts - as well as the foundation-specific values. - -1. Commit these changes and run the `development` pipeline all the way through. - -!!! info "A Quicker `development` Deploy Process" - Since all of the legwork was done manually in the `sandbox` environment - there is no need to login to the `development` Ops Manager environment. - - If there are no configuration changes, the only file that needs to be promoted is `versions.yml` - - -## Advanced Pipeline Design - -### Matching Resource Names and Input Names - -As an alternative to `input_mapping`, -we can create resources that match the input names on our tasks. -Even if these resources map to the same git repository and branch, -they can be declared as separate inputs. - ----excerpt--- "examples/input-matched-resources-usage" - -As long as those resources have an associated `get: ` -in the job, they will automatically be mapped to the inputs of the tasks in that job: - ----excerpt--- "examples/configure-director-matched-resources-usage" - -!!! warning "Passed Constraints" - If you have two resources defined with the same git repository, such as env and config, - and have a passed constraint on only one of them, - there is a possibility that they will not be at the same SHA for any given job in your pipeline. - - Example: - ```yaml - - get: config - - get: env - passed: [previous-job] - ``` - -### Modifying Resources in-place - -!!! info "Concourse 5+ Only" - This section uses a Concourse feature that allows inputs and outputs to have the same name. - This feature is only available in Concourse 5+. The following does not work with Concourse 4. - -In certain circumstances, resources can be modified by one task in a job -for use later in that same job. A few tasks that offer this ability include: - -- [credhub-interpolate] -- [prepare-tasks-with-secrets] -- [prepare-image] - -For each of these tasks, `output_mapping` can be used to "overwrite" -an input with a modified input for use with tasks later in that job. - -In the following example, `prepare-tasks-with-secrets` takes in the -`platform-automation-tasks` input and modifies it for the `download-product` -task. A more in-depth explanation of this can be found on the [secrets-handling][secrets-handling] page. - -```yaml -- name: configure-director - serial: true - plan: - - aggregate: - - get: platform-automation-image - params: - unpack: true - - get: platform-automation-tasks - params: - unpack: true - - get: config - - get: env - - task: prepare-tasks-with-secrets - image: platform-automation-image - file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml - input_mapping: - tasks: platform-automation-tasks - output_mapping: - tasks: platform-automation-tasks - params: - CONFIG_PATHS: config - - task: download-product - image: platform-automation-image - # The following platform-automation-tasks have been modified - # by the prepare-tasks-with-secrets task - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-ops-manager.yml -``` - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/pipelines/multiple-products.html.md.erb b/docs/pipelines/multiple-products.html.md.erb new file mode 100644 index 00000000..7087537b --- /dev/null +++ b/docs/pipelines/multiple-products.html.md.erb @@ -0,0 +1,1212 @@ +# VMware Tanzu Operations Manager and multiple products + +In this topic you will find a reference pipeline that illustrates the tasks and provides an example of a basic pipeline design. You know your environment and constraints. VMware recommends that you look at the tasks that make up the pipeline, and see how they can be arranged for your specific automation needs. For a deeper dive into each task, see the [Task Reference](../tasks.html). + +These Concourse pipelines are examples of how to use the tasks. If you use a different CI/CD platform, you can use these Concourse files as examples of the inputs, outputs, and arguments used in each step in the workflow. + +## Prerequisites + +* Deployed Concourse + +

+ Platform Automation Toolkit is based on Concourse CI. + We recommend that you have some familiarity with Concourse before getting started. + If you are new to Concourse, Concourse CI Tutorials is a good place to start.

+ + + +* Persisted datastore that can be accessed by Concourse resource (for example, s3, gcs, minio) +* A valid [Env file](../how-to-guides/configuring-env.html#generating-env-file): this file will contain credentials necessary to login to Tanzu Operations Manager using the `om` CLI. +It is used by every task within Platform Automation Toolkit +* A valid [Auth file](../how-to-guides/configuring-auth.html#auth-file): this file will contain the credentials necessary to create the Tanzu Operations Manager login the first time +the VM is created. The choices for this file are: + * simple authentication + * saml authentication + +

+ There will be some crossover between the auth file and the env file due to how om is set up and how the system works. It is highly recommended to parameterize these values, and let a credential management system (such as CredHub) fill in these values for you to maintain consistency across files.

+ +* An [opsman-configuration](../inputs-outputs.html) file: This file is required to connect to an IAAS, and to control +the lifecycle management of the Tanzu Operations Manager VM. +* A [director-configuration](../how-to-guides/creating-a-director-config-file.html) file: Each Tanzu Operations Manager needs its own configuration, but it is retrieved differently than +a product configuration. This config is used to deploy a new Tanzu Operations Manager director, or to update an existing one. +* A set of valid [product-configuration](../how-to-guides/adding-a-product.html) files: Each product configuration is a YAML file that contains the properties +necessary to configure a Tanzu Operations Manager product using the `om` tool. This can be used during install or update. +* (Optional) A working [CredHub](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-platform-for-cloud-foundry/10-0/tpcf/credhub-index.html) setup with its own UAA client and secret. + +

+ Ensure that products have been procured from the Broadcom Support portal using the information in + Retrieving external dependencies.

+ +## Installing VMware Tanzu Operations Manager and multiple products + +The pipeline shows how to compose the tasks +to install Tanzu Operations Manager and the Tanzu Platform for Cloud Foundry and Healthwatch products. +Its dependencies are coming from a trusted git repository, +which can be retrieved as shown in [Retrieving external dependencies](../pipelines/resources.html). + +### Full pipeline and reference configurations + +The [docs-platform-automation-reference-pipeline-config](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config) git repository +contains the [full pipeline file](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config/blob/develop/pipelines/pipeline.yml), +along with other pipeline and configuration examples. + +This can be useful when you want to take +a fully assembled pipeline as a starting point. +The rest of this document covers the sections of the full pipeline in more detail. + +## Pipeline components + +### S3 resources + +These can be uploaded manually or from the [reference resources pipeline](../pipelines/resources.html). + +```yaml +resources: +- name: platform-automation-tasks + type: s3 + source: + access_key_id: ((s3_access_key_id)) + secret_access_key: ((s3_secret_access_key)) + region_name: ((s3_region_name)) + bucket: ((s3_pivnet_products_bucket)) + regexp: .*tasks-(.*).zip + +- name: platform-automation-image + type: s3 + source: + access_key_id: ((s3_access_key_id)) + secret_access_key: ((s3_secret_access_key)) + region_name: ((s3_region_name)) + bucket: ((s3_pivnet_products_bucket)) + regexp: .*image-(.*).tgz + +- name: telemetry-collector-binary + type: s3 + source: + access_key_id: ((s3_access_key_id)) + secret_access_key: ((s3_secret_access_key)) + region_name: ((s3_region_name)) + bucket: ((s3_pivnet_products_bucket)) + regexp: .*telemetry-(.*).tgz +``` + +

+If you are retrieving pas-windows and pas-windows-stemcell from an S3 bucket, +you must use the built-in S3 Concourse resource. +This is shown in the previous example. +The download-product task with SOURCE: s3 does not persist meta information +about necessary stemcell for pas-windows +because VMware does not distribute the Windows file system.

+ +

Alternatively, products may be downloaded using the download-product task with +the param SOURCE set to s3|azure|gcs. +In a job, specify the following task:

+ +```yaml +... +- task: download-pas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-configs/pas.yml + SOURCE: s3 + input_mapping: + config: interpolated-creds + output_mapping: + downloaded-product: pas-product + downloaded-stemcell: pas-stemcell +... +``` + +### Exported installation resource + +<%= partial "./export_installation_note" %> + +```yaml +- name: installation + type: s3 + source: + access_key_id: ((s3_access_key_id)) + secret_access_key: ((s3_secret_access_key)) + region_name: ((s3_region_name)) + bucket: ((s3_installation_bucket)) + regexp: ((foundation))-installation-(.*).zip +``` + +### Configured resources + +These configured resources contain values for +Tanzu Operations Manager VM creation, director, product, foundation-specific vars, auth, and env files. +For more details, see the [Inputs and outputs](../inputs-outputs.html) section. +Platform Automation Toolkit will not create these resources for you. + +```yaml + +# VM state and foundation configuration +- name: state + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_foundation_state_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + versioned_file: state-((foundation)).yml + initial_content_text: '{}' + initial_version: 'empty-start' + +# configurations +- name: configuration + type: git + source: + private_key: ((docs-ref-pipeline-repo-key.private_key)) + uri: ((docs-ref-pipeline-repo-uri)) + branch: develop +``` + +### Trigger resources + +```yaml +# triggers used to have jobs do something in a timely manner +- name: one-time-trigger + type: time + source: + interval: 999999h + +- name: daily-trigger + type: time + source: + interval: 24h +``` + +### Secrets handling + +This secrets handling example helps load secrets stored in an external credential manager such as CredHub. +Concourse support several [credential managers](https://concourse-ci.org/creds.html) natively. + +The configuration below uses the [`prepare-tasks-with-secrets`](../tasks.html#prepare-tasks-with-secrets) task +to load secrets from your external configuration files. + +```yaml + +# This task is used in multiple jobs +# The YAML anchor "*prepare-tasks-with-secrets" is used in its place +prepare-tasks-with-secrets: &prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + config: configuration + vars: configuration + params: + CONFIG_PATHS: config/foundations/config config/foundations/((foundation))/config + VARS_PATHS: vars/foundations/((foundation))/vars + output_mapping: + tasks: platform-automation-tasks +``` + +### Jobs + +Each job corresponds to a box +on the visual representation of your Concourse pipeline. +These jobs consume resources defined above. + +```yaml +jobs: +- name: test-platform-automation + serial: true + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: test-interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/test-interpolate.yml + params: + CONFIG_FILE: foundations/((foundation))/config/download-tas.yml + SKIP_MISSING: true + input_mapping: + config: configuration + - task: test + file: platform-automation-tasks/tasks/test.yml + image: platform-automation-image + +- name: install-opsman + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + - get: one-time-trigger + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - get: state + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: download-opsman-image + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-opsman.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: opsman-image + - task: create-vm + image: platform-automation-image + file: platform-automation-tasks/tasks/create-vm.yml + input_mapping: + image: opsman-image + config: configuration + vars: configuration + params: + OPSMAN_CONFIG_FILE: foundations/((foundation))/config/opsman.yml + STATE_FILE: state-((foundation)).yml + VARS_FILES: vars/foundations/((foundation))/vars/director.yml + ensure: &put-state + do: + - put: state + params: + file: generated-state/state-((foundation)).yml + - task: configure-authentication + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-authentication.yml + attempts: 10 + input_mapping: + env: configuration + config: configuration + params: + ENV_FILE: foundations/config/env.yml + AUTH_CONFIG_FILE: foundations/config/auth.yml + - task: configure-opsman + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-opsman.yml + input_mapping: + env: configuration + config: configuration + vars: configuration + params: + ENV_FILE: foundations/config/env.yml + OPSMAN_CONFIG_FILE: foundations/((foundation))/config/opsman.yml + VARS_FILES: vars/foundations/((foundation))/vars/director.yml + - task: configure-director + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-director.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + ENV_FILE: foundations/config/env.yml + DIRECTOR_CONFIG_FILE: foundations/((foundation))/config/director.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + vars/foundations/((foundation))/vars/tas.yml + vars/foundations/((foundation))/vars/pks.yml + - task: apply-director-changes + image: platform-automation-image + attempts: 3 + file: platform-automation-tasks/tasks/apply-director-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: export-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/export-installation.yml + input_mapping: + env: configuration + params: + INSTALLATION_FILE: ((foundation))-installation-$timestamp.zip + ENV_FILE: foundations/config/env.yml + - put: installation + params: + file: installation/((foundation))-installation*.zip + +- name: export-installation + serial_groups: [ install ] + serial: true + plan: + - in_parallel: + - get: state + passed: [ install-opsman ] + - get: daily-trigger + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: revert-staged-changes + image: platform-automation-image + file: platform-automation-tasks/tasks/revert-staged-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: export-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/export-installation.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + INSTALLATION_FILE: ((foundation))-installation-$timestamp.zip + - put: installation + params: + file: installation/((foundation))-installation*.zip + +- name: upgrade-opsman + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: installation + passed: [ export-installation ] + - get: configuration + - get: state + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: download-opsman-image + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-opsman.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: opsman-image + - task: upgrade-opsman + image: platform-automation-image + file: platform-automation-tasks/tasks/upgrade-opsman.yml + input_mapping: + image: opsman-image + config: configuration + env: configuration + vars: configuration + params: + ENV_FILE: foundations/config/env.yml + OPSMAN_CONFIG_FILE: foundations/((foundation))/config/opsman.yml + STATE_FILE: state-((foundation)).yml + INSTALLATION_FILE: ((foundation))-installation*.zip + VARS_FILES: vars/foundations/((foundation))/vars/director.yml + ensure: *put-state + - task: configure-director + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-director.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + ENV_FILE: foundations/config/env.yml + DIRECTOR_CONFIG_FILE: foundations/((foundation))/config/director.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + vars/foundations/((foundation))/vars/tas.yml + vars/foundations/((foundation))/vars/pks.yml + - task: apply-director-changes + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-director-changes.yml + attempts: 3 + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: export-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/export-installation.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + INSTALLATION_FILE: ((foundation))-installation-$timestamp.zip + - put: installation + params: + file: installation/((foundation))-installation*.zip + +- name: download-upload-and-stage-pks + serial: true + serial_groups: [ products ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + trigger: true + passed: [ "upgrade-opsman" ] + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: download-pks + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-pks.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: pks-product + downloaded-stemcell: pks-stemcell + - task: upload-and-stage-pks + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-and-stage-product.yml + input_mapping: + product: pks-product + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: upload-pks-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-stemcell.yml + input_mapping: + env: configuration + stemcell: pks-stemcell + params: + ENV_FILE: foundations/config/env.yml + +- name: download-upload-and-stage-tas + serial: true + serial_groups: [ products ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + trigger: true + passed: [ "upgrade-opsman" ] + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: download-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-tas.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: tas-product + downloaded-stemcell: tas-stemcell + - task: upload-tas-product + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-product.yml + input_mapping: + product: tas-product + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: upload-tas-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-stemcell.yml + input_mapping: + env: configuration + stemcell: tas-stemcell + params: + ENV_FILE: foundations/config/env.yml + - task: upload-and-stage-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/stage-product.yml + input_mapping: + product: tas-product + env: configuration + params: + ENV_FILE: foundations/config/env.yml +- name: download-upload-and-stage-healthwatch + serial: true + serial_groups: [ products ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + trigger: true + passed: [ "upgrade-opsman" ] + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-healthwatch + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-healthwatch.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: healthwatch-product + downloaded-stemcell: healthwatch-stemcell + - task: upload-and-stage-healthwatch + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-and-stage-product.yml + input_mapping: + product: healthwatch-product + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: upload-healthwatch-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-stemcell.yml + input_mapping: + env: configuration + stemcell: healthwatch-stemcell + params: + ENV_FILE: foundations/config/env.yml + +- name: download-upload-and-stage-healthwatch-pas-exporter + serial: true + serial_groups: [ products ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + trigger: true + passed: [ "upgrade-opsman" ] + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-healthwatch + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-healthwatch-pas-exporter.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: healthwatch-pas-exporter + - task: upload-and-stage-healthwatch-pas-exporter + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-and-stage-product.yml + input_mapping: + product: healthwatch-pas-exporter + env: configuration + params: + ENV_FILE: foundations/config/env.yml + +- name: download-upload-and-stage-healthwatch-pks-exporter + serial: true + serial_groups: [ products ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + trigger: true + passed: [ "upgrade-opsman" ] + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-healthwatch + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-healthwatch-pks-exporter.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: healthwatch-pks-exporter + - task: upload-and-stage-healthwatch-pks-exporter + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-and-stage-product.yml + input_mapping: + product: healthwatch-pks-exporter + env: configuration + params: + ENV_FILE: foundations/config/env.yml + +- name: configure-pks + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - download-upload-and-stage-pks + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: configure-pks + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/pks.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + vars/foundations/((foundation))/vars/pks.yml + +- name: configure-tas + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - download-upload-and-stage-tas + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: configure-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/tas.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/tas.yml + vars/foundations/((foundation))/vars/director.yml + +- name: configure-healthwatch + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - download-upload-and-stage-healthwatch + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: configure-healthwatch + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/healthwatch.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + +- name: configure-healthwatch-pas-exporter + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - download-upload-and-stage-healthwatch-pas-exporter + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: configure-healthwatch + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/healthwatch-pas-exporter.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + +- name: configure-healthwatch-pks-exporter + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - download-upload-and-stage-healthwatch-pks-exporter + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: configure-healthwatch + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/healthwatch-pks-exporter.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + +- name: apply-product-changes + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - configure-pks + - configure-tas + - configure-healthwatch + - configure-healthwatch-pas-exporter + - configure-healthwatch-pks-exporter + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: pre-deploy-check + image: platform-automation-image + file: platform-automation-tasks/tasks/pre-deploy-check.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: apply-product-changes + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: check-pending-changes + image: platform-automation-image + file: platform-automation-tasks/tasks/check-pending-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + ALLOW_PENDING_CHANGES: true + - task: export-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/export-installation.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + INSTALLATION_FILE: ((foundation))-installation-$timestamp.zip + - put: installation + params: + file: installation/((foundation))-installation*.zip +- name: run-tas-smoketest-errand + serial: true + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - configure-pks + - configure-tas + - configure-healthwatch + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: run-bosh-errand + image: platform-automation-image + file: platform-automation-tasks/tasks/run-bosh-errand.yml + input_mapping: + env: configuration + params: + PRODUCT_NAME: cf + ERRAND_NAME: smoke_tests + ENV_FILE: foundations/config/env.yml + OPSMAN_SSH_PRIVATE_KEY: ((ops_manager_ssh_private_key)) +- name: collect-telemetry + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: telemetry-collector-binary + params: + unpack: true + - get: platform-automation-image + params: + unpack: true + passed: + - apply-product-changes + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: collect-telemetry-data + image: platform-automation-image + file: platform-automation-tasks/tasks/collect-telemetry.yml + input_mapping: + env: configuration + config: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/telemetry.yml + ENV_FILE: foundations/config/env.yml + - task: send-telemetry-data + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/send-telemetry.yml + params: + API_KEY: no-op-test-key + DATA_FILE_PATH: collected-telemetry-data/FoundationDetails*.tar +- name: expiring-certificates + serial: true + serial_groups: [ install ] + plan: + - in_parallel: + - get: daily-trigger + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - get: state + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: expiring-certificates + image: platform-automation-image + file: platform-automation-tasks/tasks/expiring-certificates.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + EXPIRES_WITHIN: 2m +- name: stage-configure-apply-telemetry + serial_groups: [install] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - apply-product-changes + trigger: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: prepare-image + <<: *prepare-image + - task: stage-configure-apply + image: platform-automation-image + file: platform-automation-tasks/tasks/stage-configure-apply.yml + attempts: 3 + params: + CONFIG_FILE: foundations/((foundation))/config/p-telemetry.yml + STAGE_PRODUCT_CONFIG_FILE: foundations/((foundation))/config/p-telemetry.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + input_mapping: + env: configuration + config: configuration + vars: configuration +- name: delete-installation + serial: true + serial_groups: [install] + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - get: state + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: delete-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/delete-installation.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: delete-vm + image: platform-automation-image + file: platform-automation-tasks/tasks/delete-vm.yml + input_mapping: + config: configuration + params: + OPSMAN_CONFIG_FILE: foundations/((foundation))/config/opsman.yml + STATE_FILE: state-((foundation)).yml + ensure: + do: + - put: state + params: + file: generated-state/state-((foundation)).yml +- name: create-root-ca + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: create-root-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-new-certificate-authority.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + +- name: apply-new-ca + serial: true + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - create-root-ca + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: pre-deploy-check + image: platform-automation-image + file: platform-automation-tasks/tasks/pre-deploy-check.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: apply-product-changes + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + SELECTIVE_DEPLOY_PRODUCTS: "cf,p-bosh,p-healthwatch2,p-healthwatch2-pas-exporter,pivotal-telemetry-om" + +- name: activate-new-ca-and-regenerate-certs + serial: true + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - apply-new-ca + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: activate-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/activate-certificate-authority.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: regenerate-certificates + image: platform-automation-image + file: platform-automation-tasks/tasks/regenerate-certificates.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + +- name: apply-certificates + serial: true + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - activate-new-ca-and-regenerate-certs + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: pre-deploy-check + image: platform-automation-image + file: platform-automation-tasks/tasks/pre-deploy-check.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: apply-product-changes + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + SELECTIVE_DEPLOY_PRODUCTS: "cf,p-bosh,p-healthwatch2,p-healthwatch2-pas-exporter,pivotal-telemetry-om" + +- name: cleanup-ca-certificate-authorities + serial: true + plan: + - in_parallel: + - get: platform-automation-image + params: + unpack: true + passed: + - apply-certificates + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: delete-certificate-authority + image: platform-automation-image + file: platform-automation-tasks/tasks/delete-certificate-authority.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: pre-deploy-check + image: platform-automation-image + file: platform-automation-tasks/tasks/pre-deploy-check.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + - task: apply-product-changes + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + SELECTIVE_DEPLOY_PRODUCTS: "cf,p-bosh,p-healthwatch2,p-healthwatch2-pas-exporter,pivotal-telemetry-om" +``` + +[//]: # ({% with path="../" %}) +[//]: # ( {% include ".internal_link_url.md" %}) +[//]: # ({% endwith %}) +[//]: # ({% include ".external_link_url.md" %}) diff --git a/docs/pipelines/multiple-products.md b/docs/pipelines/multiple-products.md deleted file mode 100644 index bcfb81ea..00000000 --- a/docs/pipelines/multiple-products.md +++ /dev/null @@ -1,128 +0,0 @@ -Below you will find a reference pipeline that illustrates the tasks and provides an example of a basic pipeline design. You know your environment and constraints and we don't - we recommend you look at the tasks that make up the pipeline, and see how they can be arranged for your specific automation needs. For a deeper dive into each task see the Task Reference. - -These Concourse pipelines are examples on how to use the [tasks](../tasks.md). If you use a different CI/CD platform, you can use these Concourse files as examples of the inputs, outputs, and arguments used in each step in the workflow. - -## Prerequisites - -* Deployed Concourse - -!!! info - Platform Automation Toolkit is based on Concourse CI. - We recommend that you have some familiarity with Concourse before getting started. - If you are new to Concourse, [Concourse CI Tutorials][concourse-tutorial] would be a good place to start. - -* Persisted datastore that can be accessed by Concourse resource (e.g. s3, gcs, minio) -* A valid [generating-env-file][generating-env-file]: this file will contain credentials necessary to login to Ops Manager using the `om` CLI. -It is used by every task within Platform Automation Toolkit -* A valid [auth-file][auth-file]: this file will contain the credentials necessary to create the Ops Manager login the first time -the VM is created. The choices for this file are simple or saml authentication. - -!!! info - There will be some crossover between the auth file and the env file due to how om is setup and how the system works. It is highly recommended to parameterize these values, and let a credential management system (such as Credhub) fill in these values for you in order to maintain consistency across files. - -* An [opsman-configuration][opsman-config] file: This file is required to connect to an IAAS, and control the lifecycle management - of the Ops Manager VM -* A [director-configuration][director-configuration] file: Each Ops Manager needs its own configuration, but it is retrieved differently from -a product configuration. This config is used to deploy a new Ops Manager director, or update an existing one. -* A set of valid [product-configuration][product-configuration] files: Each product configuration is a yaml file that contains the properties -necessary to configure an Ops Manager product using the `om` tool. This can be used during install or update. -* (Optional) A working [credhub][credhub] setup with its own UAA client and secret. - - -!!! info "Retrieving products from Tanzu Network" - Please ensure products have been procured from Tanzu Network using the [reference-resources][reference-resources]. - -## Installing Ops Manager and multiple products - -The pipeline shows how to compose the tasks -to install Ops Manager and the Tanzu Application Service and Healthwatch products. -Its dependencies are coming from a trusted git repository, -which can be retrieved using [this pipeline][reference-resources]. - -### Full Pipeline and Reference Configurations - -There is a [git repository][ref-config-repo] -containing containing the [full pipeline file][ref-config-pipeline], -along with other pipeline and configuration examples. - -This can be useful when you want to take -a fully assembled pipeline as a starting point; -the rest of this document covers the sections of the full pipeline in more detail. - -## Pipeline Components - -### S3 Resources - -These can either be uploaded manually or from the [reference resources pipeline][reference-resources]. - ----excerpt--- "reference/reference-resources-s3" - -!!! tip "Tanzu Application Service-Windows with S3" - If retrieving `pas-windows` and `pas-windows-stemcell` from an S3 bucket, - you must use the built in S3 concourse resource. - This is done in the example above. - The `download-product` task with `SOURCE: s3` does not persist meta information - about necessary stemcell for `pas-windows` - because VMware does not distribute the Window's file system. - -Alternatively, products may be downloaded using the `download-product` task with -the param `SOURCE` set to `s3|azure|gcs`. -In a job, specify the following task: - -```yaml -... -- task: download-pas - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-product-configs/pas.yml - SOURCE: s3 - input_mapping: - config: interpolated-creds - output_mapping: - downloaded-product: pas-product - downloaded-stemcell: pas-stemcell -... -``` - -### Exported Installation Resource - -{% include "./.export_installation_note.md" %} - ----excerpt--- "reference/export-installation-resource-usage" - -### Configured Resources - -These contain values for -opsman VM creation, director, product, foundation-specific vars, auth, and env files. -For more details, see the [Inputs and Outputs][inputs-outputs] section. -Platform Automation Toolkit will not create these resources for you. - ----excerpt--- "reference/resources-configurations" - -### Trigger Resources - ----excerpt--- "reference/resources-triggers" - -### Secrets Handling - -This helps load secrets stored in an external credential manager -- such as Credhub. -Concourse support several [credential managers][concourse-secrets-handling] natively. - -The configuration below uses the [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task -to load secrets from your external configuration files. - ----excerpt--- "reference/prepare-tasks-with-secrets-usage" - -### Jobs - -Each job corresponds to a "box" -on the visual representation of your Concourse pipeline. -These jobs consume resources defined above. - ----excerpt--- "reference/reference-jobs" - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/pipelines/resources.html.md.erb b/docs/pipelines/resources.html.md.erb new file mode 100644 index 00000000..85a68a59 --- /dev/null +++ b/docs/pipelines/resources.html.md.erb @@ -0,0 +1,541 @@ +# Retrieving external dependencies + +In this topic, you will find a reference pipeline that illustrates the tasks and provides an example of a basic pipeline design. You know your environment and constraints; VMware recommend that you look at the tasks that make up the pipeline, and see how they can be arranged for your specific automation needs. For a deeper dive into each task see the [Task Reference](../tasks.html). + +These Concourse pipelines are examples on how to use the tasks. If you use a different CI/CD platform, you can use these Concourse files as examples of the inputs, outputs, and arguments used in each step in the workflow. + +## Prerequisites + +* Deployed Concourse + +

+ Platform Automation Toolkit is based on Concourse CI. + We recommend that you have some familiarity with Concourse before getting started. + If you are new to Concourse, see Installing Concourse with BOSH.

+ +* Persisted datastore that can be accessed by Concourse resource (for example, s3, gcs, minio) +* A set of valid [download-product-config](../pipelines/multiple-products.html#download-product-config) files: Each product has a configuration YAML of what version to download from the [Broadcom Support portal](https://support.broadcom.com/group/ecx/downloads). +* Broadcom Support portal access to [Platform Automation Toolkit](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Platform%20Automation%20Toolkit) + +## Retrieval from the Broadcom Support portal (formerly Tanzu Network) + +The pipeline downloads dependencies consumed by the tasks +and places them into a trusted s3-like storage provider. +This helps other Concourse deployments without internet access +retrieve task dependencies. + +The pipeline requires configuration for the [download-product](../tasks.html#download-product) task. +See the following for examples that you can use. + +

+Note the unique regex format for blob names, +for example: \[p-healthwatch,(.*)\]p-healthwatch-.*.pivotal. +The Broadcom Support portal file names will not always contain the necessary metadata +to accurately download files from a blobstore (for example, s3, gcs, azure), +so the product slug and version are prepended when using download-product. +For more information about how this works, +and what to expect when using download-product, +see the download-product task reference

+ +**Healthwatch** + +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-product-slug: p-healthwatch + +file-glob: "healthwatch-2*.pivotal" +product-version-regex: ^2\.1\..*$ +stemcell-iaas: google + +s3-access-key-id: ((s3_access_key_id)) +s3-secret-access-key: ((s3_secret_access_key)) +s3-bucket: ((s3_pivnet_products_bucket)) +s3-region-name: ((s3_region_name)) +s3-stemcell-path: healthwatch-stemcell +``` + +**Operations Manager** + +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-product-slug: ops-manager + +file-glob: "ops-manager-gcp*.yml" +product-version-regex: ^2\.10\.\d+$ #^2\.\d+\.\d+$|^2\.\d+\.\d+-rc.*$|^2\.\d+\.\d+-alpha.*$ + +s3-access-key-id: ((s3_access_key_id)) +s3-secret-access-key: ((s3_secret_access_key)) +s3-bucket: ((s3_pivnet_products_bucket)) +s3-region-name: ((s3_region_name)) +``` + +**PKS** + +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-product-slug: pivotal-container-service + +file-glob: "pivotal-container-service*.pivotal" +product-version-regex: ^1\.12\..*$ +stemcell-iaas: google + +s3-access-key-id: ((s3_access_key_id)) +s3-secret-access-key: ((s3_secret_access_key)) +s3-bucket: ((s3_pivnet_products_bucket)) +s3-region-name: ((s3_region_name)) +s3-stemcell-path: pks-stemcell +``` + +**Tanzu Platform for Cloud Foundry** + +```yaml +--- +pivnet-api-token: ((pivnet_token)) +pivnet-product-slug: elastic-runtime + +file-glob: "srt*.pivotal" +product-version-regex: ^2\.11\..*$ +stemcell-iaas: google + +s3-access-key-id: ((s3_access_key_id)) +s3-secret-access-key: ((s3_secret_access_key)) +s3-bucket: ((s3_pivnet_products_bucket)) +s3-region-name: ((s3_region_name)) +s3-stemcell-path: tas-stemcell +``` + +### Full Pipeline and Reference Configurations + +There is a git repository, [docs-platform-automation-reference-pipeline-config](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config), +containing containing the [full pipeline file](https://github.com/pivotal/docs-platform-automation-reference-pipeline-config/blob/develop/pipelines/download-products.yml), +along with other pipeline and configuration examples. + +This can be useful when you want to take +a fully assembled pipeline as a starting point; +the rest of this document covers the sections of the full pipeline in more detail. + +## Pipeline components + +### Resource types + +This custom resource type uses the [pivnet-resource](https://github.com/pivotal-cf/pivnet-resource) +to pull down and separate both pieces of the Platform Automation Toolkit product (tasks and image) +so they can be stored separately in S3. + +```yaml +resource_types: +- name: pivnet + type: docker-image + source: + repository: pivotalcf/pivnet-resource + tag: latest-final +``` + +### Product resources + +These are the S3 resources where Platform Automation Toolkit [`download-product`](../tasks.html#download-product) outputs will be stored. +Each product/stemcell needs a separate resource defined. +Platform Automation Toolkit will not create these resources for you. + +```yaml +resources: +- name: opsman-product + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: \[ops-manager,(.*)\]ops-manager-gcp.*.yml + +- name: pks-product + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: \[pivotal-container-service,(.*)\]pivotal-container-service-.*.pivotal + +- name: pks-stemcell + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: pks-stemcell/\[stemcells-ubuntu-xenial,(.*)\]light-bosh-stemcell-.*-google.*\.tgz + +- name: tas-product + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: \[elastic-runtime,(.*)\]srt-.*.pivotal + +- name: tas-stemcell + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: tas-stemcell/\[stemcells-ubuntu-xenial,(.*)\]light-bosh-stemcell-.*-google.*\.tgz + +- name: healthwatch-product + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: \[p-healthwatch,(.*)\]healthwatch-\d.*.pivotal + +- name: healthwatch-pas-exporter + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: \[p-healthwatch,(.*)\]healthwatch-pas-exporter.*.pivotal + +- name: healthwatch-pks-exporter + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: \[p-healthwatch,(.*)\]healthwatch-pks-exporter.*.pivotal + +- name: healthwatch-stemcell + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: healthwatch-stemcell/\[stemcells-ubuntu-xenial,(.*)\]light-bosh-stemcell-.*-google.*\.tgz + +- name: telemetry-product + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: \[pivotal-telemetry-collector,(.*)\]telemetry-collector-.*.tgz +``` + +### Platform Automation Toolkit resources + +`platform-automation-pivnet` is downloaded directly from the Broadcom Support portal +and will be used to download all other products from the Broadcom Support portal. + +`platform-automation-tasks` and `platform-automation-image` are S3 resources +that will be stored for internet-restricted, or faster, access. +Platform Automation Toolkit will not create this resource for you. + +```yaml +- name: platform-automation-pivnet + type: pivnet + source: + api_token: ((pivnet_token)) + product_slug: platform-automation + product_version: 2\.(.*) + sort_by: semver + +- name: platform-automation-tasks + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: platform-automation-tasks-(.*).zip + +- name: platform-automation-image + type: s3 + source: + access_key_id: ((s3_access_key_id)) + bucket: ((s3_pivnet_products_bucket)) + region_name: ((s3_region_name)) + secret_access_key: ((s3_secret_access_key)) + regexp: platform-automation-image-(.*).tgz +``` + +### Configured resources + +You need to add your [`download-product` configuration](../inputs-outputs.html#download-product-config) files +to your configurations repo. +Platform Automation Toolkit will not create these resources for you. +For more details, see [Inputs and outputs](../inputs-outputs.html). + +```yaml +- name: configuration + type: git + source: + private_key: ((docs-ref-pipeline-repo-key.private_key)) + uri: ((docs-ref-pipeline-repo-uri)) + branch: develop + submodules: all + depth: 1 +``` + +### Trigger resources + +```yaml +- name: daily + type: time + source: + interval: 24h +``` + +### Secrets handling + +This example shows how to load secrets stored in an external credential manager such as CredHub. +Concourse supports several [credential managers](https://concourse-ci.org/creds.html) natively. + +The configuration below uses the [`prepare-tasks-with-secrets`](../tasks.html#prepare-tasks-with-secrets) task +to load secrets from your external configuration files. + +```yaml +# This task is used in multiple jobs +# The YAML anchor "*prepare-tasks-with-secrets" is used in its place +prepare-tasks-with-secrets: &prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + params: + CONFIG_PATHS: config/download-product-pivnet + input_mapping: + tasks: platform-automation-tasks + config: configuration + output_mapping: + tasks: platform-automation-tasks +``` + +### Jobs + +Each job corresponds to a box on the visual representation of your Concourse pipeline. +These jobs consume the resources defined above. + +```yaml +jobs: +- name: fetch-opsman + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-opsman-image + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-opsman.yml + input_mapping: + config: configuration + - in_parallel: + - put: opsman-product + params: + file: downloaded-product/* +- name: fetch-pks + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-pks-product-and-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-pks.yml + input_mapping: + config: configuration + output_mapping: {downloaded-stemcell: pks-stemcell} + - in_parallel: + - put: pks-product + params: + file: downloaded-product/*.pivotal + - put: pks-stemcell + params: + file: pks-stemcell/*.tgz + +- name: fetch-tas + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-tas-product-and-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-tas.yml + input_mapping: + config: configuration + output_mapping: {downloaded-stemcell: tas-stemcell} + - in_parallel: + - put: tas-product + params: + file: downloaded-product/*.pivotal + - put: tas-stemcell + params: + file: tas-stemcell/*.tgz + +- name: fetch-healthwatch + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-healthwatch-product-and-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-healthwatch.yml + input_mapping: + config: configuration + output_mapping: {downloaded-stemcell: healthwatch-stemcell} + - in_parallel: + - put: healthwatch-product + params: + file: downloaded-product/*healthwatch-2*.pivotal + - put: healthwatch-stemcell + params: + file: healthwatch-stemcell/*.tgz + +- name: fetch-healthwatch-pas-exporter + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-healthwatch-pas-exporter-and-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-healthwatch-pas-exporter.yml + input_mapping: + config: configuration + - in_parallel: + - put: healthwatch-pas-exporter + params: + file: downloaded-product/*healthwatch-pas-exporter*.pivotal + +- name: fetch-healthwatch-pks-exporter + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-healthwatch-pks-exporter-and-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-healthwatch-pks-exporter.yml + input_mapping: + config: configuration + - in_parallel: + - put: healthwatch-pks-exporter + params: + file: downloaded-product/*healthwatch-pks-exporter*.pivotal + +- name: fetch-telemetry-collector + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-telemetry-product + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-telemetry.yml + input_mapping: + config: configuration + - in_parallel: + - put: telemetry-product + params: + file: downloaded-product/* + +- name: fetch-platform-automation + # We use the pivnet resource to bootstrap the pipeline, + # and because this product is part of the pipeline, not the foundation + plan: + - get: platform-automation-pivnet + trigger: true + - in_parallel: + - put: platform-automation-tasks + params: + file: platform-automation-pivnet/*tasks*.zip + - put: platform-automation-image + params: + file: platform-automation-pivnet/*image*.tgz +``` + +[//]: # ({% with path="../" %}) +[//]: # ( {% include ".internal_link_url.md" %}) +[//]: # ({% endwith %}) +[//]: # ({% include ".external_link_url.md" %}) diff --git a/docs/pipelines/resources.md b/docs/pipelines/resources.md deleted file mode 100644 index 30e34e2c..00000000 --- a/docs/pipelines/resources.md +++ /dev/null @@ -1,122 +0,0 @@ -Below you will find a reference pipeline that illustrates the tasks and provides an example of a basic pipeline design. You know your environment and constraints and we don't - we recommend you look at the tasks that make up the pipeline, and see how they can be arranged for your specific automation needs. For a deeper dive into each task see the Task Reference. - -These Concourse pipelines are examples on how to use the [tasks](../tasks.md). If you use a different CI/CD platform, you can use these Concourse files as examples of the inputs, outputs, and arguments used in each step in the workflow. - -## Prerequisites - -* Deployed Concourse - -!!! info - Platform Automation Toolkit is based on Concourse CI. - We recommend that you have some familiarity with Concourse before getting started. - If you are new to Concourse, [Concourse CI Tutorials][concourse-tutorial] would be a good place to start. - -* Persisted datastore that can be accessed by Concourse resource (e.g. s3, gcs, minio) -* A set of valid [download-product-config][download-product-config] files: Each product has a configuration YAML of what version to download from Tanzu Network. -* Tanzu Network access to [Platform Automation Toolkit][tanzu-network-platform-automation] - -## Retrieval from Tanzu Network - -{% include "./.opsman_filename_change_note.md" %} - -The pipeline downloads dependencies consumed by the tasks -and places them into a trusted s3-like storage provider. -This helps other concourse deployments without internet access -retrieve task dependencies. - -!!! tip "Blobstore filename prefixing" - Note the unique regex format for blob names, - for example: `\[p-healthwatch,(.*)\]p-healthwatch-.*.pivotal`. - Tanzu Network filenames will not always contain the necessary metadata - to accurately download files from a blobstore (i.e. s3, gcs, azure). - So, the product slug and version are prepended when using `download-product`. - For more information on how this works, - and what to expect when using `download-product`, - refer to the [`download-product` task reference.][download-product] - -The pipeline requires configuration for the [download-product](../tasks.md#download-product) task. -Below are examples that can be used. - -=== "Healthwatch" - ---excerpt--- "reference/download-healthwatch-from-pivnet-usage" -=== "Ops Manager" - ---excerpt--- "reference/download-ops-manager-from-pivnet-usage" -=== "PKS" - ---excerpt--- "reference/download-pks-from-pivnet-usage" -=== "TAS" - ---excerpt--- "reference/download-tas-from-pivnet-usage" - - -### Full Pipeline and Reference Configurations - -There is a [git repository][ref-config-repo] -containing containing the [full pipeline file][ref-config-resource-pipeline], -along with other pipeline and configuration examples. - -This can be useful when you want to take -a fully assembled pipeline as a starting point; -the rest of this document covers the sections of the full pipeline in more detail. - -## Pipeline Components - -### Resource Types - -This custom resource type uses the [pivnet-resource][pivnet-resource] -to pull down and separate both pieces of the Platform Automation Toolkit product (tasks and image) -so they can be stored separately in S3. - ----excerpt--- "reference/resources-pipeline-resource-types" - -### Product Resources - -S3 resources where Platform Automation Toolkit [`download-product`][download-product] outputs will be stored. -Each product/stemcell needs a separate resource defined. -Platform Automation Toolkit will not create these resources for you. - ----excerpt--- "reference/resources-pipeline-products" - -### Platform Automation Toolkit Resources - -`platform-automation-pivnet` is downloaded directly from Tanzu Network -and will be used to download all other products from Tanzu Network. - -`platform-automation-tasks` and `platform-automation-image` are S3 resources -that will be stored for internet-restricted, or faster, access. -Platform Automation Toolkit will not create this resource for you. - ----excerpt--- "reference/resources-pipeline-platform-automation" - -### Configured Resources - -You will need to add your [`download-product` configuration][download-product-config] configuration files -to your configurations repo. -Platform Automation Toolkit will not create these resources for you. -For more details, see the [Inputs and Outputs][inputs-outputs] section. - ----excerpt--- "reference/resources-pipeline-configurations" - -### Trigger Resources - ----excerpt--- "reference/resources-pipeline-triggers" - -### Secrets Handling - -This helps load secrets stored in an external credential manager -- such as Credhub. -Concourse supports several [credential managers][concourse-secrets-handling] natively. - -The configuration below uses the [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task -to load secrets from your external configuration files. - ----excerpt--- "reference/resources-pipeline-prepare-tasks-with-secrets" - -### Jobs - -Each job corresponds to a "box" on the visual representation of your Concourse pipeline. -These jobs consume resources defined above. - ----excerpt--- "reference/resources-pipeline-jobs" - -{% with path="../" %} - {% include ".internal_link_url.md" %} -{% endwith %} -{% include ".external_link_url.md" %} diff --git a/docs/r-release-notes.html.md.erb b/docs/r-release-notes.html.md.erb new file mode 100644 index 00000000..75bd3f3e --- /dev/null +++ b/docs/r-release-notes.html.md.erb @@ -0,0 +1,51 @@ +# Release Notes + +These are the release notes for Platform Automation Toolkit for VMware Tanzu. + +## v5.2.1 +July 26, 2024 + +**CLI Versions** + +| Name | version | +|---|---| +| aws-cli | [1.33.30](https://github.com/aws/aws-cli/releases/tag/1.33.30) | +| azure-cli | [2.62.0](https://github.com/Azure/azure-cli/releases/tag/azure-cli-2.62.0) | +| bbr-cli | [1.9.66](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.66) | +| bosh-cli | [v7.6.2](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.6.2) | +| credhub | [2.9.35](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.35) | +| gcloud-cli | 485.0.0 | +| govc-cli | [0.39.0](https://github.com/vmware/govmomi/releases/tag/v0.39.0) | +| om | [7.13.0](https://github.com/pivotal-cf/om/releases/tag/7.13.0) | +| winfs-injector | [0.26.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.26.0) | + +The full Docker image-receipt: Download + +### What's New + +* `om` has been updated to 7.13.0 for bug fixes and dependency updates. +* Adds `FORCE_LATEST_VARIABLES` param to apply changes tasks by @selzoc in #52 + + +## v5.2.0 +June 28, 2024 + +**CLI Versions** + +| Name | version | +|---|---| +| aws-cli | [1.33.13](https://github.com/aws/aws-cli/releases/tag/1.33.13) | +| azure-cli | [2.61.0](https://github.com/Azure/azure-cli/releases/tag/azure-cli-2.61.0) | +| bbr-cli | [1.9.66](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.66) | +| bosh-cli | [v7.6.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.6.1) | +| credhub | [2.9.33](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.33) | +| gcloud-cli | 481.0.0 | +| govc-cli | [0.37.3](https://github.com/vmware/govmomi/releases/tag/v0.37.3) | +| om | [7.12.0](https://github.com/pivotal-cf/om/releases/tag/7.12.0) | +| winfs-injector | [0.25.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.25.0) | + +The full Docker image-receipt: Download + +### What's New + +* `om` has been updated to 7.12.0 in order to have the Auto-Accept EULA functionality for TanzuNet depreciation. diff --git a/docs/release-notes.html.md.erb b/docs/release-notes.html.md.erb new file mode 100644 index 00000000..ae838604 --- /dev/null +++ b/docs/release-notes.html.md.erb @@ -0,0 +1,256 @@ +# Release Notes for Platform Automation Toolkit + +These are the release notes for Platform Automation Toolkit for VMware Tanzu. + +

+Tanzu Application Service is now called Tanzu Platform for Cloud Foundry. +The current version of Tanzu Platform for Cloud Foundry is 10.0.

+ + +## v5.2.3 + +March 26, 2025 + + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.38.18 | +| azure-cli | 2.70.0 | +| bbr-cli | [1.9.74](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.74) | +| bosh-cli | [v7.9.4](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.9.4) | +| credhub | [2.9.44](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.44) | +| gcloud-cli | 515.0.0 | +| govc-cli | 0.49.0 | +| om | [7.15.0](https://github.com/pivotal-cf/om/releases/tag/7.15.0) | +| winfs-injector | [0.26.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.26.0) | + +The full Docker image-receipt: Download + +### What's New + +- Added support for specifying a stemcell slug when downloading a product through the `om download-product` command + +## v5.2.2 + +September 18, 2024 + + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.34.21 | +| azure-cli | 2.64.0 | +| bbr-cli | [1.9.69](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.69) | +| bosh-cli | [v7.7.2](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.7.2) | +| credhub | [2.9.37](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.37) | +| gcloud-cli | 493.0.0 | +| govc-cli | 0.43.0 | +| om | [7.14.0](https://github.com/pivotal-cf/om/releases/tag/7.14.0) | +| winfs-injector | [0.26.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.26.0) | + +The full Docker image-receipt: Download + +### Bug Fixes + +- Update tasks to use user supplied env var prefix + + +## v5.2.1 +July 26, 2024 + + +## v5.1.2 + +June 15, 2023 + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.27.153 | +| azure-cli | 2.49.0 | +| bbr-cli | [1.9.46](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.46) | +| bosh-cli | [v7.2.3](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.2.3) | +| credhub | [2.9.16](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.16) | +| gcloud-cli | 435.0.1 | +| govc-cli | 0.30.4 | +| om | 45876ef5954ddb419cd88126d77b4e8ebb2ca554-2023-05-03T17:38:11-07:00 | +| winfs-injector | [0.22.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.22.0) | + +The full Docker image-receipt: Download + +### Maintenance Release + +- Update Platform Automation Toolkit to use Ubuntu Jammy-based Paketo buildpack images + + +## v5.1.1 + +May 5, 2023 + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.24.10 | +| azure-cli | 2.39.0 | +| bbr-cli | [1.9.44](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.44) | +| bosh-cli | [v7.2.3](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.2.3) | +| credhub | [2.9.14](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.14) | +| gcloud-cli | 429.0.0 | +| govc-cli | 0.30.4 | +| om | 45876ef5954ddb419cd88126d77b4e8ebb2ca554-2023-05-03T17:38:11-07:00 | +| winfs-injector | [0.22.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.22.0) | + +The full Docker image-receipt: Download + +### Bug Fixes + +- Update govc to v0.30.4 +- Delete all python files from vsphere-only docker image + + +## v5.1.0 + +February 27, 2023 + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.24.10 | +| azure-cli | 2.39.0 | +| bbr-cli | [1.9.38](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.38) | +| bosh-cli | [v7.1.3](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.1.3) | +| credhub | [2.9.11](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.11) | +| gcloud-cli | 419.0.0 | +| govc-cli | 0.30.2 | +| om | 2ba733630d765e1b41e815ce1b49e825da2c192b-2023-02-24T11:33:19-07:00 | +| winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | + +The full Docker image-receipt: Download + +### What's New + +- Added new How-to Guide about [Rotating Certificate Authority][rotating-certificate-authority]. + This how-to-guide shows you how to write a pipeline for rotating the certificate authority on an existing Tanzu Operations Manager. +- The following additional tasks have been added to help with rotating certificate authorities: + * [`activate-certificate-authority`][activate-certificate-authority] + * [`configure-new-certificate-authority`][configure-new-certificate-authority] + * [`delete-certificate-authority`][delete-certificate-authority] + * [`generate-certificate`][generate-certificate] + * [`regenerate-certificates`][regenerate-certificates] + + +## v5.0.26 + +June 25, 2024 + + +## v5.0.25 + +June 15, 2023 + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.27.153 | +| azure-cli | 2.49.0 | +| bbr-cli | [1.9.46](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.46) | +| bosh-cli | [v7.2.3](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.2.3) | +| credhub | [2.9.16](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.16) | +| gcloud-cli | 435.0.1 | +| govc-cli | 0.30.4 | +| om | 45876ef5954ddb419cd88126d77b4e8ebb2ca554-2023-05-03T17:38:11-07:00 | +| winfs-injector | [0.22.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.22.0) | + +The full Docker image-receipt: Download + +### Maintenance Release + +- Update Platform Automation Toolkit to use Ubuntu Jammy-based Paketo buildpack images + + +## v5.0.24 + +May 5, 2023 + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.24.10 | +| azure-cli | 2.39.0 | +| bbr-cli | [1.9.44](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.44) | +| bosh-cli | [v7.2.3](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.2.3) | +| credhub | [2.9.14](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.14) | +| gcloud-cli | 429.0.0 | +| govc-cli | 0.30.4 | +| om | 45876ef5954ddb419cd88126d77b4e8ebb2ca554-2023-05-03T17:38:11-07:00 | +| winfs-injector | [0.22.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.22.0) | + +The full Docker image-receipt: Download + +### Bug Fixes + +- Update govc to v0.30.4 +- Delete all python files from vsphere-only docker image + + +## v4.4.33 + +June 25, 2024 + +## v4.4.32 + +June 15, 2023 + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.27.153 | +| azure-cli | 2.49.0 | +| bbr-cli | [1.9.46](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.46) | +| bosh-cli | [v7.2.3](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.2.3) | +| credhub | [2.9.16](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.16) | +| gcloud-cli | 435.0.1 | +| govc-cli | 0.30.4 | +| om | 45876ef5954ddb419cd88126d77b4e8ebb2ca554-2023-05-03T17:38:11-07:00 | +| winfs-injector | [0.22.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.22.0) | + +The full Docker image-receipt: Download + +### Maintenance Release + +- Update Platform Automation Toolkit to use Ubuntu Jammy-based Paketo buildpack images + + +## v4.4.31 + +May 5, 2023 + +### CLI Versions + +| Name | version | +|---|---| +| aws-cli | 1.24.10 | +| azure-cli | 2.39.0 | +| bbr-cli | [1.9.44](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.44) | +| bosh-cli | [v7.2.3](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.2.3) | +| credhub | [2.9.14](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.14) | +| gcloud-cli | 429.0.0 | +| govc-cli | 0.30.4 | +| om | 45876ef5954ddb419cd88126d77b4e8ebb2ca554-2023-05-03T17:38:11-07:00 | +| winfs-injector | [0.22.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.22.0) | + +The full Docker image-receipt: Download + +### Bug Fixes + +- Update govc to v0.30.4 +- Delete all python files from vsphere-only docker image diff --git a/docs/release-notes.md b/docs/release-notes.md deleted file mode 100644 index 28d400bc..00000000 --- a/docs/release-notes.md +++ /dev/null @@ -1,4411 +0,0 @@ - - -{% include "./.opsman_filename_change_note.md" %} - -!!! warning "Azure Updating to 2.5" - Ops Manager will be removing the necessity to provide availability zones for azure. - If your `director.yml`(see [`staged-director-config`][staged-director-config]) - has a block like the following in the networks section: - ```yaml - availability_zone_names: - - "null" - ``` - your deployment will have the following error: - ```json - {"errors":["Availability zones cannot find availability zone with name null"]} - ``` - To fix this error, please remove the `availability_zone_names` section from your azure config, or re-run - [`staged-director-config`][staged-director-config] to update your `director.yml`. - -## v5.0.23 -January 4, 2023 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.24.10 | - | azure-cli | 2.39.0 | - | bbr-cli | [1.9.38](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.38) | - | bosh-cli | [v7.1.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.1.0) | - | credhub | [2.9.9](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.9) | - | gcloud-cli | 412.0.0 | - | govc-cli | 0.30.0 | - | om | 694a983454bf38737eb32bf348a6e54099c5618d-2022-10-24T10:50:20-06:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- bump bundled iso-replicator binary to 0.13.0, compiled with Golang 1.19.4 - - -## v5.0.22 -October 24, 2022 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.24.10 | - | azure-cli | 2.39.0 | - | bbr-cli | [1.9.38](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.38) | - | bosh-cli | [v7.0.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.0.1) | - | credhub | [2.9.6](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.6) | - | gcloud-cli | 406.0.0 | - | govc-cli | 0.29.0 | - | om | 694a983454bf38737eb32bf348a6e54099c5618d-2022-10-24T10:50:20-06:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes - Bump versions of included binaries - - -## v5.0.21 -March 21, 2022 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.22.77 | - | azure-cli | 2.34.1 | - | bbr-cli | [1.9.26](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.26) | - | bosh-cli | [v6.4.17](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.17) | - | credhub | [2.9.3](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.3) | - | gcloud-cli | 377.0.0 | - | govc-cli | 0.27.4 | - | om | 2aeff1d15cfe3e192567098afc107d718110b33f-2022-03-07T14:07:08-05:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Bump versions of included binaries - - -## v5.0.20 -January 5, 2022 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.22.28 | - | azure-cli | 2.32.0 | - | bbr-cli | [1.9.21](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.21) | - | bosh-cli | [v6.4.10](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.10) | - | credhub | [2.9.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.1) | - | gcloud-cli | 367.0.0 | - | govc-cli | 0.27.2 | - | om | 87f12ad07a994a2946c2a04303d98dc589e67744-2021-11-30T14:26:51-08:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. -Resolves [USN-5199-1](https://ubuntu.com/security/notices/USN-5199-1), -an issue related to python3.6. -- CVE update to container image. -Resolves [USN-5189-1](https://ubuntu.com/security/notices/USN-5189-1), -an issue related to glib2.0. - - -## v5.0.19 -November 11, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.22.3 | - | azure-cli | 2.30.0 | - | bbr-cli | [1.9.18](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.18) | - | bosh-cli | [v6.4.7](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.7) | - | credhub | [2.9.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.1) | - | gcloud-cli | 364.0.0 | - | govc-cli | 0.27.1 | - | om | a9865819e957ebd1512c9fb1af41ab4a4ff0e834-2021-11-11T06:57:05-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. - Resolves [USN 5133-1](https://ubuntu.com/security/notices/USN-5133-1), - an issue related to ICU crashing - - -## v5.0.18 -September 23, 2021 - -### Bug Fixes -- Fixed an issue on the `tasks/configure-opsman.sh`, which had a line that printed `om` help messages. -- CVE update to container image. - Resolves [USN 5089-1](https://ubuntu.com/security/notices/USN-5089-1), - an issue related to expiring ca certificate. -- CVE update to container image. - Resolves [USN 5079-3](https://ubuntu.com/security/notices/USN-5079-3), - an issue related to curl. -- CVE update to container image. - Resolves [USN-5080-1](https://ubuntu.com/security/notices/USN-5080-1), - an issue related to libgcrypt. -- CVE update to container image. - Resolves [USN-5079-1](https://ubuntu.com/security/notices/USN-5079-1), - an issue related to curl. -- CVE update to container image. - Resolves [USN-5076-1](https://ubuntu.com/security/notices/USN-5076-1), - an issue related to git. -- CVE update to container image. - Resolves [USN-5051-3](https://ubuntu.com/security/notices/USN-5051-3), - an issue related to OpenSSL. -- CVE update to container image. - Resolves [USN-5051-1](https://ubuntu.com/security/notices/USN-5051-1), - an issue related to OpenSSL. -- CVE update to container image. - Resolves [USN-3809-2](https://ubuntu.com/security/notices/USN-3809-2), - an issue related to OpenSSH. - -## v5.0.17 -August 2, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.20.11 | - | azure-cli | 2.26.1 | - | bbr-cli | [1.9.11](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.11) | - | bosh-cli | [v6.4.4](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.4) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 350.0.0 | - | govc-cli | 0.26.0 | - | om | c9895b73b2b111a24b7c4ae787a7603d7f8a2723-2021-08-01T20:26:31-06:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `om` CLI has been explicitly requesting `opaque` tokens. - This was an unintentional and incidental change; - previously it used `jwt`, - the token format UAA provides by default. - The `opaque` token may have been contributing to - a hard-to-reproduce issue in a customer environment, - so we're explicitly switching back to `jwt` tokens. -- CVE update to container image. - Resolves [USN 5021-1](https://ubuntu.com/security/notices/USN-5021-1), - an issue related to libcurl. -- CVE update to container image. - Resolves [USN 4991-1](https://ubuntu.com/security/notices/USN-4991-1), - an issue related to libxml2. - - -## v5.0.16 -June 17, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.19.96 | - | azure-cli | 2.25.0 | - | bbr-cli | [1.9.7](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.7) | - | bosh-cli | [v6.4.4](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.4) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 345.0.0 | - | govc-cli | 0.26.0 | - | om | f0370bb68d212b136c1d673684c36bd57173665c-2021-06-17T08:25:19-06:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Features -- When creating an Ops Manager VM on Vsphere, the disk size can be set via the configuration file to sizes larger than the default of 160 (GB). - - ```yaml - --- - opsman-configuration: - vsphere: - disk_size: 200 - vm_name: ops-manager-vm - cpu: 4 - memory: 16 - disk_type: thin - dns: 8.8.8.8 - gateway: 192.168.10.1 - hostname: ops-manager.example.com - netmask: 255.255.255.192 - network: example-virtual-network - ntp: ntp.ubuntu.com - private_ip: 10.0.0.10 - ssh_public_key: ssh-rsa ...... - vcenter: - ca_cert: cert - datacenter: example-dc - datastore: example-ds-1 - folder: /example-dc/vm/Folder - url: vcenter.example.com - username: ((vcenter-username)) - password: ((vcenter-password)) - resource_pool: /example-dc/host/example-cluster/Resources/example-pool - ``` - -- When creating or updating an Ops Manager VM on Azure, you can set an optional tags argument for creating tags on the Ops Manager VM. - - ```yaml - --- - opsman-configuration: - azure: - tags: Key=Value - vm_name: ops-manager-vm - boot_disk_size: 200 - tenant_id: 3e52862f-a01e-4b97-98d5-f31a409df682 - subscription_id: 90f35f10-ea9e-4e80-aac4-d6778b995532 - client_id: 5782deb6-9195-4827-83ae-a13fda90aa0d - client_secret: ((opsman-client-secret)) - location: westus - resource_group: res-group - storage_account: opsman - ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAZ... - subnet_id: /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/ - private_ip: 10.0.0.3 - ``` - -### Bug Fixes -- CVE update to container image. Resolves [USN-4891-1](https://ubuntu.com/security/notices/USN-4891-1). The CVEs are related to vulnerabilities with `libssl`. -- CVE update to container image. Resolves [USN-4968-1](https://ubuntu.com/security/notices/USN-4968-1). The CVEs are related to vulnerabilities with `liblz4-1`. -- CVE update to container image. Resolves [USN-4906-1](https://ubuntu.com/security/notices/USN-4906-1) and [USN-4990-1](https://ubuntu.com/security/notices/USN-4990-1). The CVEs are related to vulnerabilities with `libnettle6`. -- CVE update to container image. Resolves [USN-4898-1](https://ubuntu.com/security/notices/USN-4898-1). The CVEs are related to vulnerabilities with `curl` and related libraries. -- CVE update to container image. Resolves [USN-4764-1](https://ubuntu.com/security/notices/USN-4764-1). The CVEs are related to vulnerabilities with `libglib2.0-0`. -- CVE update to container image. Resolves [USN-4761-1](https://ubuntu.com/security/notices/USN-4761-1). The CVEs are related to vulnerabilities with `git`. - - -## v5.0.15 -March 9, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.19.23 | - | azure-cli | 2.20.0 | - | bbr-cli | [1.9.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 330.0.0 | - | govc-cli | v0.24.0 | - | om | 6516c1a327f7bb7ede88c857e5b4d0d58f27f5bc-2021-01-26T10:53:54-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4738-1](https://ubuntu.com/security/notices/USN-4738-1). The CVEs are related to vulnerabilities with `libssl`. -- CVE update to container image. Resolves [USN-4754-1](https://ubuntu.com/security/notices/USN-4754-1). The CVEs are related to vulnerabilities with `python` and related libraries. -- CVE update to container image. Resolves [USN-4759-1](https://ubuntu.com/security/notices/USN-4759-1). The CVEs are related to vulnerabilities with `libglib2.0-0`. -- CVE update to container image. Resolves [USN-4760-1](https://ubuntu.com/security/notices/USN-4760-1). The CVEs are related to vulnerabilities with `libzstd` and related libraries. - - -## v5.0.14 -February 25, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.19.7 | - | azure-cli | 2.19.1 | - | bbr-cli | [1.9.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 327.0.0 | - | govc-cli | v0.24.0 | - | om | 6516c1a327f7bb7ede88c857e5b4d0d58f27f5bc-2021-01-26T10:53:54-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4719-1](https://ubuntu.com/security/notices/USN-4719-1). - - -## v5.0.13 -January 15, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.215 | - | azure-cli | 2.17.1 | - | bbr-cli | [1.9.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 323.0.0 | - | govc-cli | v0.24.0 | - | om | 39fd21c57e46588e76bb07826a2d8809e29382e9-2021-01-12T08:23:20-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Use [`pip` documented](https://pip.pypa.io/en/stable/installing/) method for installing it on the container image -- With new GCP practices, defining a [`hostname`](https://cloud.google.com/compute/docs/instances/custom-hostname-vm) on the VM can be required. - When creating an Ops Manager VM on GCP, the attribute can be set via the configuration file. - - ```yaml - --- - opsman-configuration: - gcp: - boot_disk_size: 100 - custom_cpu: 4 - custom_memory: 16 - gcp_service_account: ((service_account_key)) - project: ((project)) - public_ip: ((ops_manager_public_ip)) - region: ((region)) - ssh_public_key: ((ops_manager_ssh_public_key)) - tags: ((ops_manager_tags)) - vm_name: ((environment_name))-ops-manager-vm - vpc_subnet: ((management_subnet_name)) - zone: ((availability_zones.0)) - hostname: testing.some.domain - ``` - -## v5.0.12 -January 5, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.208 | - | azure-cli | 2.17.1 | - | bbr-cli | [1.9.0](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.0) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 321.0.0 | - | govc-cli | v0.24.0 | - | om | dc7ecb856d9d6e8a5538512922e688bd337ab246-2021-01-04T09:19:13-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - -### Bug Fixes -- CVE update to container image. Resolves [USN-4672-1](https://ubuntu.com/security/notices/USN-4672-1). - The CVEs are related to vulnerabilities with `unzip` and related libraries. -- CVE update to container image. Resolves [USN-4667-1](https://ubuntu.com/security/notices/USN-4667-1). - The CVEs are related to vulnerabilities with `apt` and related libraries. -- CVE update to container image. Resolves [USN-4665-1](https://ubuntu.com/security/notices/USN-4665-1). - The CVEs are related to vulnerabilities with `curl` and related libraries. -- CVE update to container image. Resolves [USN-4662-1](https://ubuntu.com/security/notices/USN-4662-1). - The CVEs are related to vulnerabilities with `libssl` and related libraries. -- CVE update to container image. Resolves [USN-4677-1](https://ubuntu.com/security/notices/USN-4677-1). - The CVEs are related to vulnerabilities with `p11` and related libraries. - - -## v5.0.11 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.189 | - | azure-cli | 2.15.1 | - | bbr-cli | [1.8.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.8.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 319.0.0 | - | govc-cli | 0.23.0 | - | om | acfd93675b65c8ca8a7f584cd53796d09e5fc88b-2020-12-03T07:19:52-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - -### Features -- [`configure-opsman`][configure-opsman] task can now configure the UAA token expirations and timeouts. - - ```yaml - tokens-expiration: - access_token_expiration: 10 - refresh_token_expiration: 10 - session_idle_timeout: 10 - ``` - -### Bug Fixes -- [`update-runtime-config`][update-runtime-config] task has the `releases` input as optional. - When looking for `releases`, if the input wasn't there then the task would fail. - A check has been added to ensure the input is there. -- With long-running tasks (using `om` commands), - sometimes the authentication token would expire. - If possible the token will be refreshed. - This should help with HTTP retries. -- CVE update to container image. Resolves [USN-4608-1](https://ubuntu.com/security/notices/USN-4608-1). - The CVEs are related to vulnerabilities with `ca-certificates` and related libraries. -- CVE update to container image. Resolves [USN-4635-1](https://ubuntu.com/security/notices/USN-4635-1). - The CVEs are related to vulnerabilities with `krb5` and related libraries. - -## v5.0.10 -November 24, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.184 | - | azure-cli | 2.15.1 | - | bbr-cli | [1.8.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.8.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 319.0.0 | - | govc-cli | 0.23.0 | - | om | 3a7d703bacb004220450d5984b01a5ea6ebe5087-2020-11-23T14:34:44-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - -### Bug Fixes -- Fixes an issue where `stemcell-heavy` in `download-product` had a regression with boolean values. - -## v5.0.9 -November 19, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.180 | - | azure-cli | 2.15.0 | - | bbr-cli | [1.8.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.8.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 319.0.0 | - | govc-cli | 0.23.0 | - | om | d424cdfe525f3d9ec4f7b6995c160d7b80c4a6f1-2020-11-18T09:51:49-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `backup` scripts did not have the correct command line options to compress the tarball. - It is still a valid tarball, just not compressed. - This has been fixed by changing `tar -cvf` to `tar -zcvf`. -- [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] could not be used with other tasks. - For example, if you had written custom tasks. - The `TASK_PATH` was added, so custom paths of tasks could be prepared, too. -- [`stage-configure-apply`][stage-configure-apply] was missing the ability to configure errands. - We've added the `ERRAND_CONFIG_FILE` parameter. -- [`stage-configure-apply`][stage-configure-apply] was using the incorrect path relative to the input. - It was using `config` instead of `assign-stemcell-config`. - -## v5.0.7 -October 23, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.163 | - | azure-cli | 2.13.0 | - | bbr-cli | 1.8.0 | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 315.0.0 | - | govc-cli | 0.23.0 | - | om | [6.4.2](https://github.com/pivotal-cf/om/releases/tag/6.4.2) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4581-1](https://usn.ubuntu.com/4581-1/). - The CVEs are related to vulnerabilities with `python3.6` and related libraries. - This affects the all IAAS container images only. -- CVE update to container image. Resolves [USN-4601-1](https://usn.ubuntu.com/4601-1/). - The CVEs are related to vulnerabilities with `python3-pip` and related libraries. - This affects the all IAAS container images only. - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v5.0.6 -October 14, 2020 - -!!! note "Updates to CLI versions" - We've now updated our list of CLI versions. - It includes the supported IAAS CLIs. - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.157 | - | azure-cli | 2.13.0 | - | bosh-cli | [6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 314.0.0 | - | govc-cli | 0.23.0 | - | om | [6.4.1](https://github.com/pivotal-cf/om/releases/tag/6.4.1) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Do to a release packaging issue, - the previous patch version had issues invoking `openstack` CLI. - This release ensures this has been fixed. - We've also added further release testing to ensure it doesn't happen again. -- [`download-and-upload-product`][download-and-upload-product] did not upload the stemcell as expected. - The task now properly uploads the stemcell if `stemcell-iaas` - is provided in the config file. - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v5.0.5 -October 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.4.1](https://github.com/pivotal-cf/om/releases/tag/6.4.1) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The "bug fixes" for collections in `om` 6.1.2+ - were causing unexpected issues in some tiles. - The collection work has been reverted - to its original functionality. -- [`pending-changes`][check-pending-changes] and [`stage-configure-apply`][stage-configure-apply] - would always fail if a product is unconfigured, new, or missing a stemcell, - regardless of whether `ALLOW_PENDING_CHANGES` was set. - This has been fixed. `pending-changes` will only fail if `ALLOW_PENDING_CHANGES: false`. -- [`stage-product`][stage-product] and [`stage-configure-apply`][stage-configure-apply] - will now accept `latest` as the `product-version` - if you are providing a `CONFIG_FILE`/`STAGE_PRODUCT_CONFIG_FILE`. - This fixes an issue that required users to update their config file - every time a new version was available on Ops Manager. -- [`stage-configure-apply`][stage-configure-apply] will now treat the `product` input - as truly optional if `CONFIG_FILE` is provided. - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v5.0.4 -Released October 2, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.3.0](https://github.com/pivotal-cf/om/releases/tag/6.3.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The [`download-and-upload-product`][download-and-upload-product] task did not provide the option to leave the stemcell floating. - A new param (`FLOATING_STEMCELL`) allows floating for a stemcell to be set. - Its default value is set to `true`. - The expectation is to affect one product, not all products. -- The [`backup-tkgi`][backup-tkgi] task had a misnamed param. - `$deployment_name` has been renamed to `$DEPLOYMENT_NAME` as was necessary. -- CVE update to container image. Resolves [USN-4512-1](https://usn.ubuntu.com/4512-1/). -The CVEs are related to vulnerabilities with `util-linux` and related libraries. -- CVE update to container image. Resolves [USN-4504-1](https://usn.ubuntu.com/4504-1/). -The CVEs are related to vulnerabilities with `libssl` and related libraries. - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v5.0.3 -September 15, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.3.0](https://github.com/pivotal-cf/om/releases/tag/6.3.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- There was an issue with how `om` retrieved credentials - within the new heuristic logic for collections updates. - In particular, this impacted the upgrade from TAS 2.6 to 2.7, - and caused `configure-product` to fail. - The error message looked something like this: - - ```bash - 2020/09/01 06:35:54 could not execute "configure-product": failed to configure product: failed to associate guids for property ".properties.credhub_internal_provider_keys" because: - request failed: unexpected response from /api/v0/deployed/products/cf-6bdb4038d37667f9f424/credentials/.properties.credhub_internal_provider_keys[0].key: - HTTP/1.1 404 Not Found - ...more HTTP headers... - ``` - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v5.0.2 -September 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.2.0](https://github.com/pivotal-cf/om/releases/tag/6.2.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Releasing the vSphere only-image, sorry about that in v5.0.1. -- Bump the CLIs for `om`, `credhub`, and `winfs-injector`. - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v5.0.1 -Released September 4, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -!!! warning "Removed from Releases" - There was a regression introduced in v5.0.1. - It will be removed form Tanzu Network. - Please use v5.0.2 instead. - -### Bug Fixes -- tl;dr: If you have experienced the following error with the [`create-vm`][create-vm] task this is fixed. - - ```bash - creating the new opsman vm - Using gcp... - Error: unexpected error: could not marshal image file: yaml: unmarshal errors: - line 6: cannot unmarshal !!map into string - ``` - - With GCP OpsManager, the image YAML file format includes a new key. - - The original format of the image YAML was: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - ``` - - The new format includes the `image` key: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - image: - name: ops-manager-2-9-10-build-177 - project: pivotal-ops-manager-images - ``` - - This patch ignores this value, where previously it would've not been able to parse it. - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v5.0.0 -Released September 2, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.2.0](https://github.com/pivotal-cf/om/releases/tag/6.2.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Breaking Changes -- Platform Automation will now require Concourse 5.0+ - -- There's an additional docker image for vSphere only. - Most of our users are on vSphere, - and excluding other IaaS-specific resources for the image greatly reduces - file size and security surface area. - The original image continues to work with all IaaSs, including vSphere, - but if you use our product on vSphere, - we recommend switching over to the new image. - The new image is named: - `vsphere-platform-automation-image-5.0.0.tar.gz`. - Note that the filename starts with `vsphere-` - and uses the alternate file extension `.tar.gz` - instead of `.tgz`. - This is to avoid breaking existing globs and patterns. - See the following (API Declaration Change) for more information. - - If you're getting our image with the Pivnet resource - as documented in the How-to guides, - the new `get` configuration would look like this: - - ```yaml - - get: platform-automation-image - resource: platform-automation - params: - globs: ["vsphere-platform-automation-image-*.tar.gz"] - unpack: true - ``` - -- Change to API Declaration Notice: - - As of 5.0 we are considering the patterns necessary to specify our files - on Tanzu Network part of our API. - Specificially, we will consider it a breaking change - if any of the following glob patterns for the Platform Automation Toolkit image and tasks - fail to return a single match - when used with the `pivnet-resource` and/or `download-product` task: - - - `platform-automation-image-*.tgz` # all IaaSes image - - `vsphere-platform-automation-image-*.tar.gz` # vSphere only image - - `platform-automation-tasks-*.zip` # tasks - - -- The deprecated `download-product-s3` task has been removed. - For the same functionality, please use [`download-product`][download-product] - and specify the `s3` `source`. - -- The [`download-product`][download-product] task - will no longer copy files to the existing outputs. - Rather, these files will be written directly. - This speeds up `download-product` in general, - especially in systems where space IO might be a constraint. - - This change _*requires*_ Concourse 5.0+. - If using an older version of Concourse, this task will error. - -### What's New -- The [`download-and-upload-product`][download-and-upload-product] task has been added. - This advanced task optimizes the steps of downloading and uploading a product file to an Ops Manager. - Before downloading, Ops Manager is checked to see if the product/stemcell has been uploaded already. - If it has, the download and upload steps are skipped. - There are no outputs on this task. - At the moment, this task only supports downloading from Tanzu Network (Pivotal Network). -- The [`backup-product`][backup-product] and [`backup-director`][backup-director] tasks have been added. - These tasks use [BOSH Backup and Restore][bbr] - to backup artifacts which can be used to restore your director and products. - Note, there is no task to automate restoring from a backup. - Restore cannot be guaranteed to be idempotent, and therefore cannot be safely automated. - See the [BBR docs][bbr-restore] for information on restoring from a backup. -- The [`backup-tkgi`][backup-tkgi] task has been added. - This task is specific to the Tanzu Kubernetes Grid Integrated Edition(TKGI) product. - It will backup the tile _and_ the TKGI clusters. - - To persist this backup to a blobstore, the blobstore resource can match the following regexes: - - - For TKGI tile: `product_*.tgz` - - For the TKGI clusters: `*_clusters_*.tgz` - - !!! info "PKS CLI may be Temporarily Unavailable" - During `backup-tkgi`, the PKS CLI is disabled. - Due to the nature of the backup, some commands may not work as expected. - -- [`apply-changes`][apply-changes] now supports the optional input `ERRAND_CONFIG_FILE`. - If provided, `apply-changes` can enable/disable an errand for a particular run. - To retrieve the default configuration of your product's errands, - `om staged-config` can be used. - The expected format for this errand config is as follows: - - ```yaml - errands: - sample-product-1: - run_post_deploy: - smoke_tests: default - push-app: false - run_pre_delete: - smoke_tests: true - sample-product-2: - run_post_deploy: - smoke_tests: default - ``` - -- [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] will now inject a params block - if one is not already present in the task. -- [`stage-configure-apply`][stage-configure-apply] now offers the ability to optionally upload and/or assign a stemcell. - To upload a stemcell, provide a `stemcell` input as you would for [`upload-stemcell`][upload-stemcell]. - To assign a stemcell, provide an `assign-stemcell-config` input - (this can be the same as your normal config, but must be mapped to this name in your `pipeline.yml`). - - If you wish to upload a stemcell, there are two new (optional) `params`:
- - `FLOATING_STEMCELL`: - this is equivalent to the `FLOATING_STEMCELL` param of [`upload-stemcell`][upload-stemcell].
- - `UPLOAD_STEMCELL_CONFIG_FILE`: - this is equivalent to the `CONFIG_FILE` param of [`upload-stemcell`][upload-stemcell].
- - If you wish to assign a specific stemcell to the staged product, - you need to provide the `assign-stemcell-config` input - and define the `ASSIGN_STEMCELL_CONFIG_FILE` param. - This param is equivalent to the `CONFIG_FILE` param of [`assign-stemcell`][assign-stemcell]. - -- [`run-bosh-errand`][run-bosh-errand] task has been added. - This task runs a specified BOSH errand directly on the BOSH director - by tunneling through the Ops Manager. - As such, any errand run in this way does not have visibility within the Ops Manager. - *Please note this is an advanced feature, and should be used at your own discretion.* - -### Deprecation Notices -- In future _major_ versions of Platform Automation, the [`credhub-interpolate`][credhub-interpolate] task will be removed. - Please use the [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task in its place. - -### Known Issues -- When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -## v4.4.30 -January 4, 2023 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.24.10 | - | azure-cli | 2.39.0 | - | bbr-cli | [1.9.38](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.38) | - | bosh-cli | [v7.1.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.1.0) | - | credhub | [2.9.9](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.9) | - | gcloud-cli | 412.0.0 | - | govc-cli | 0.30.0 | - | om | 694a983454bf38737eb32bf348a6e54099c5618d-2022-10-24T10:50:20-06:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- bump bundled iso-replicator binary to 0.13.0, compiled with Golang 1.19.4 - - -## v4.4.29 -October 24, 2022 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.24.10 | - | azure-cli | 2.39.0 | - | bbr-cli | [1.9.38](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.38) | - | bosh-cli | [v7.0.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v7.0.1) | - | credhub | [2.9.6](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.6) | - | gcloud-cli | 406.0.0 | - | govc-cli | 0.29.0 | - | om | 694a983454bf38737eb32bf348a6e54099c5618d-2022-10-24T10:50:20-06:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes - Bump versions of included binaries - - -## v4.4.28 -March 21, 2022 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.22.77 | - | azure-cli | 2.34.1 | - | bbr-cli | [1.9.26](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.26) | - | bosh-cli | [v6.4.17](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.17) | - | credhub | [2.9.3](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.3) | - | gcloud-cli | 377.0.0 | - | govc-cli | 0.27.4 | - | om | 2aeff1d15cfe3e192567098afc107d718110b33f-2022-03-07T14:07:08-05:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Bump versions of included binaries - - -## v4.4.27 -January 5, 2022 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.22.28 | - | azure-cli | 2.32.0 | - | bbr-cli | [1.9.21](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.21) | - | bosh-cli | [v6.4.10](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.10) | - | credhub | [2.9.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.1) | - | gcloud-cli | 367.0.0 | - | govc-cli | 0.27.2 | - | om | 87f12ad07a994a2946c2a04303d98dc589e67744-2021-11-30T14:26:51-08:00 | - | winfs-injector | [0.21.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.21.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. -Resolves [USN-5199-1](https://ubuntu.com/security/notices/USN-5199-1), -an issue related to python3.6. -- CVE update to container image. -Resolves [USN-5189-1](https://ubuntu.com/security/notices/USN-5189-1), -an issue related to glib2.0. - - -## v4.4.26 -November 11, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.22.3 | - | azure-cli | 2.30.0 | - | bbr-cli | [1.9.18](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.18) | - | bosh-cli | [v6.4.7](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.7) | - | credhub | [2.9.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.1) | - | gcloud-cli | 364.0.0 | - | govc-cli | 0.27.1 | - | om | a9865819e957ebd1512c9fb1af41ab4a4ff0e834-2021-11-11T06:57:05-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. - Resolves [USN 5133-1](https://ubuntu.com/security/notices/USN-5133-1), - an issue related to ICU crashing - - -## v4.4.25 -September 28, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.20.48 | - | azure-cli | 2.28.1 | - | bbr-cli | [1.9.15](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.15) | - | bosh-cli | [v6.4.7](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.7) | - | credhub | [2.9.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.1) | - | gcloud-cli | 358.0.0 | - | govc-cli | 0.26.1 | - | om | 347d10298c51ce2db4c5f775e60531f1729b14fd-2021-09-20T15:22:38-06:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes - -- Fixed an issue on the `tasks/configure-opsman.sh`, which had a line that printed `om` help messages. -- CVE update to container image. - Resolves [USN 5089-1](https://ubuntu.com/security/notices/USN-5089-1), - an issue related to expiring ca certificate. -- CVE update to container image. - Resolves [USN 5079-3](https://ubuntu.com/security/notices/USN-5079-3), - an issue related to curl. -- CVE update to container image. - Resolves [USN-5080-1](https://ubuntu.com/security/notices/USN-5080-1), - an issue related to libgcrypt. -- CVE update to container image. - Resolves [USN-5079-1](https://ubuntu.com/security/notices/USN-5079-1), - an issue related to curl. -- CVE update to container image. - Resolves [USN-5076-1](https://ubuntu.com/security/notices/USN-5076-1), - an issue related to git. -- CVE update to container image. - Resolves [USN-5051-3](https://ubuntu.com/security/notices/USN-5051-3), - an issue related to OpenSSL. -- CVE update to container image. - Resolves [USN-5051-1](https://ubuntu.com/security/notices/USN-5051-1), - an issue related to OpenSSL. -- CVE update to container image. - Resolves [USN-3809-2](https://ubuntu.com/security/notices/USN-3809-2), - an issue related to OpenSSH. - - -## v4.4.24 -September 23, 2021 - -### Bug Fixes -- Fixed an issue on the `tasks/configure-opsman.sh`, which had a line that printed `om` help messages. -- CVE update to container image. - Resolves [USN 5089-1](https://ubuntu.com/security/notices/USN-5089-1), - an issue related to expiring ca certificate. -- CVE update to container image. - Resolves [USN 5079-3](https://ubuntu.com/security/notices/USN-5079-3), - an issue related to curl. -- CVE update to container image. - Resolves [USN-5080-1](https://ubuntu.com/security/notices/USN-5080-1), - an issue related to libgcrypt. -- CVE update to container image. - Resolves [USN-5079-1](https://ubuntu.com/security/notices/USN-5079-1), - an issue related to curl. -- CVE update to container image. - Resolves [USN-5076-1](https://ubuntu.com/security/notices/USN-5076-1), - an issue related to git. -- CVE update to container image. - Resolves [USN-5051-3](https://ubuntu.com/security/notices/USN-5051-3), - an issue related to OpenSSL. -- CVE update to container image. - Resolves [USN-5051-1](https://ubuntu.com/security/notices/USN-5051-1), - an issue related to OpenSSL. -- CVE update to container image. - Resolves [USN-3809-2](https://ubuntu.com/security/notices/USN-3809-2), - an issue related to OpenSSH. - -## v4.4.23 -August 2, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.20.11 | - | azure-cli | 2.26.1 | - | bbr-cli | [1.9.11](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.11) | - | bosh-cli | [v6.4.4](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.4) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 350.0.0 | - | govc-cli | 0.26.0 | - | om | c9895b73b2b111a24b7c4ae787a7603d7f8a2723-2021-08-01T20:26:31-06:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `om` CLI has been explicitly requesting `opaque` tokens. - This was an unintentional and incidental change; - previously it used `jwt`, - the token format UAA provides by default. - The `opaque` token may have been contributing to - a hard-to-reproduce issue in a customer environment, - so we're explicitly switching back to `jwt` tokens. -- CVE update to container image. - Resolves [USN 5021-1](https://ubuntu.com/security/notices/USN-5021-1), - an issue related to libcurl. -- CVE update to container image. - Resolves [USN 4991-1](https://ubuntu.com/security/notices/USN-4991-1), - an issue related to libxml2. - - -## v4.4.22 -June 17, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.19.96 | - | azure-cli | 2.25.0 | - | bbr-cli | [1.9.7](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.7) | - | bosh-cli | [v6.4.4](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.4) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 345.0.0 | - | govc-cli | 0.26.0 | - | om | f0370bb68d212b136c1d673684c36bd57173665c-2021-06-17T08:25:19-06:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4891-1](https://ubuntu.com/security/notices/USN-4891-1). The CVEs are related to vulnerabilities with `libssl`. -- CVE update to container image. Resolves [USN-4968-1](https://ubuntu.com/security/notices/USN-4968-1). The CVEs are related to vulnerabilities with `liblz4-1`. -- CVE update to container image. Resolves [USN-4906-1](https://ubuntu.com/security/notices/USN-4906-1) and [USN-4990-1](https://ubuntu.com/security/notices/USN-4990-1). The CVEs are related to vulnerabilities with `libnettle6`. -- CVE update to container image. Resolves [USN-4898-1](https://ubuntu.com/security/notices/USN-4898-1). The CVEs are related to vulnerabilities with `curl` and related libraries. -- CVE update to container image. Resolves [USN-4764-1](https://ubuntu.com/security/notices/USN-4764-1). The CVEs are related to vulnerabilities with `libglib2.0-0`. -- CVE update to container image. Resolves [USN-4761-1](https://ubuntu.com/security/notices/USN-4761-1). The CVEs are related to vulnerabilities with `git`. - - -## v4.4.21 -March 9, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.19.23 | - | azure-cli | 2.20.0 | - | bbr-cli | [1.9.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 330.0.0 | - | govc-cli | v0.24.0 | - | om | 6516c1a327f7bb7ede88c857e5b4d0d58f27f5bc-2021-01-26T10:53:54-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4738-1](https://ubuntu.com/security/notices/USN-4738-1). The CVEs are related to vulnerabilities with `libssl`. -- CVE update to container image. Resolves [USN-4754-1](https://ubuntu.com/security/notices/USN-4754-1). The CVEs are related to vulnerabilities with `python` and related libraries. -- CVE update to container image. Resolves [USN-4759-1](https://ubuntu.com/security/notices/USN-4759-1). The CVEs are related to vulnerabilities with `libglib2.0-0`. -- CVE update to container image. Resolves [USN-4760-1](https://ubuntu.com/security/notices/USN-4760-1). The CVEs are related to vulnerabilities with `libzstd` and related libraries. - - -## v4.4.20 -February 25, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.19.7 | - | azure-cli | 2.19.1 | - | bbr-cli | [1.9.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 327.0.0 | - | govc-cli | v0.24.0 | - | om | 6516c1a327f7bb7ede88c857e5b4d0d58f27f5bc-2021-01-26T10:53:54-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4719-1](https://ubuntu.com/security/notices/USN-4719-1). - - -## v4.4.19 -January 15, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.215 | - | azure-cli | 2.17.1 | - | bbr-cli | [1.9.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 323.0.0 | - | govc-cli | v0.24.0 | - | om | 39fd21c57e46588e76bb07826a2d8809e29382e9-2021-01-12T08:23:20-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Use [`pip` documented](https://pip.pypa.io/en/stable/installing/) method for installing it on the container image - - -## v4.4.18 -January 5, 2021 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.208 | - | azure-cli | 2.17.1 | - | bbr-cli | [1.9.0](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.9.0) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.9.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.9.0) | - | gcloud-cli | 321.0.0 | - | govc-cli | v0.24.0 | - | om | dc7ecb856d9d6e8a5538512922e688bd337ab246-2021-01-04T09:19:13-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - -### Bug Fixes -- CVE update to container image. Resolves [USN-4672-1](https://ubuntu.com/security/notices/USN-4672-1). - The CVEs are related to vulnerabilities with `unzip` and related libraries. -- CVE update to container image. Resolves [USN-4667-1](https://ubuntu.com/security/notices/USN-4667-1). - The CVEs are related to vulnerabilities with `apt` and related libraries. -- CVE update to container image. Resolves [USN-4665-1](https://ubuntu.com/security/notices/USN-4665-1). - The CVEs are related to vulnerabilities with `curl` and related libraries. -- CVE update to container image. Resolves [USN-4662-1](https://ubuntu.com/security/notices/USN-4662-1). - The CVEs are related to vulnerabilities with `libssl` and related libraries. -- CVE update to container image. Resolves [USN-4677-1](https://ubuntu.com/security/notices/USN-4677-1). - The CVEs are related to vulnerabilities with `p11` and related libraries. - - -## v4.4.17 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.189 | - | azure-cli | 2.15.1 | - | bbr-cli | [1.8.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.8.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 319.0.0 | - | govc-cli | 0.23.0 | - | om | acfd93675b65c8ca8a7f584cd53796d09e5fc88b-2020-12-03T07:19:52-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - -### Features -- [`configure-opsman`][configure-opsman] task can now configure the UAA token expirations and timeouts. - - ```yaml - tokens-expiration: - access_token_expiration: 10 - refresh_token_expiration: 10 - session_idle_timeout: 10 - ``` - -### Bug Fixes -- [`update-runtime-config`][update-runtime-config] task has the `releases` input as optional. - When looking for `releases`, if the input wasn't there then the task would fail. - A check has been added to ensure the input is there. -- With long-running tasks (using `om` commands), - sometimes the authentication token would expire. - If possible the token will be refreshed. - This should help with HTTP retries. -- CVE update to container image. Resolves [USN-4608-1](https://ubuntu.com/security/notices/USN-4608-1). - The CVEs are related to vulnerabilities with `ca-certificates` and related libraries. -- CVE update to container image. Resolves [USN-4635-1](https://ubuntu.com/security/notices/USN-4635-1). - The CVEs are related to vulnerabilities with `krb5` and related libraries. - -## v4.4.16 -November 24, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.184 | - | azure-cli | 2.15.1 | - | bbr-cli | [1.8.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.8.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 319.0.0 | - | govc-cli | 0.23.0 | - | om | 3a7d703bacb004220450d5984b01a5ea6ebe5087-2020-11-23T14:34:44-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - -### Bug Fixes -- Fixes an issue where `stemcell-heavy` in `download-product` had a regression with boolean values. - -## v4.4.15 -November 19, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.180 | - | azure-cli | 2.15.0 | - | bbr-cli | [1.8.1](https://github.com/cloudfoundry-incubator/bosh-backup-and-restore/releases/tag/v1.8.1) | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 319.0.0 | - | govc-cli | 0.23.0 | - | om | d424cdfe525f3d9ec4f7b6995c160d7b80c4a6f1-2020-11-18T09:51:49-07:00 | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] could not be used with other tasks. - For example, if you had written custom tasks. - The `TASK_PATH` was added, so custom paths of tasks could be prepared, too. - -## v4.4.13 -October 23, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.163 | - | azure-cli | 2.13.0 | - | bbr-cli | 1.8.0 | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 315.0.0 | - | govc-cli | 0.23.0 | - | om | [6.4.2](https://github.com/pivotal-cf/om/releases/tag/6.4.2) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4581-1](https://usn.ubuntu.com/4581-1/). - The CVEs are related to vulnerabilities with `python3.6` and related libraries. - This affects the all IAAS container images only. -- CVE update to container image. Resolves [USN-4601-1](https://usn.ubuntu.com/4601-1/). - The CVEs are related to vulnerabilities with `python3-pip` and related libraries. - This affects the all IAAS container images only. - -## v4.4.12 -October 14, 2020 - -!!! note "Updates to CLI versions" - We've now updated our list of CLI versions. - It includes the supported IAAS CLIs. - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.157 | - | azure-cli | 2.13.0 | - | bosh-cli | [6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 314.0.0 | - | govc-cli | 0.23.0 | - | om | [6.4.1](https://github.com/pivotal-cf/om/releases/tag/6.4.1) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Do to a release packaging issue, - the previous patch version had issues invoking `openstack` CLI. - This release ensures this has been fixed. - We've also added further release testing to ensure it doesn't happen again. - - - -## v4.4.11 -October 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.4.1](https://github.com/pivotal-cf/om/releases/tag/6.4.1) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The "bug fixes" for collections in `om` 6.1.2+ - were causing unexpected issues in some tiles. - The collection work has been reverted - to its original functionality. -- [`pending-changes`][check-pending-changes] and [`stage-configure-apply`][stage-configure-apply] - would always fail if a product is unconfigured, new, or missing a stemcell, - regardless of whether `ALLOW_PENDING_CHANGES` was set. - This has been fixed. `pending-changes` will only fail if `ALLOW_PENDING_CHANGES: false`. -- [`stage-product`][stage-product] and [`stage-configure-apply`][stage-configure-apply] - will now accept `latest` as the `product-version` - if you are providing a `CONFIG_FILE`/`STAGE_PRODUCT_CONFIG_FILE`. - This fixes an issue that required users to update their config file - every time a new version was available on Ops Manager. -- [`stage-configure-apply`][stage-configure-apply] will now treat the `product` input - as truly optional if `CONFIG_FILE` is provided. - -## v4.4.10 -October 2, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.3.0](https://github.com/pivotal-cf/om/releases/tag/6.3.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes - - CVE update to container image. Resolves [USN-4512-1](https://usn.ubuntu.com/4512-1/). - The CVEs are related to vulnerabilities with `util-linux` and related libraries. - - CVE update to container image. Resolves [USN-4504-1](https://usn.ubuntu.com/4504-1/). - The CVEs are related to vulnerabilities with `libssl` and related libraries. - - -## v4.4.9 -September 15, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.3.0](https://github.com/pivotal-cf/om/releases/tag/6.3.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- There was an issue with how `om` retrieved credentials - within the new heuristic logic for collections updates. - In particular, this impacted the upgrade from TAS 2.6 to 2.7, - and caused `configure-product` to fail. - The error message looked something like this: - - ```bash - 2020/09/01 06:35:54 could not execute "configure-product": failed to configure product: failed to associate guids for property ".properties.credhub_internal_provider_keys" because: - request failed: unexpected response from /api/v0/deployed/products/cf-6bdb4038d37667f9f424/credentials/.properties.credhub_internal_provider_keys[0].key: - HTTP/1.1 404 Not Found - ...more HTTP headers... - ``` - -## v4.4.8 -September 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.2.0](https://github.com/pivotal-cf/om/releases/tag/6.2.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Bump the CLIs for `om`, `credhub`, and `winfs-injector`. - -## v4.4.7 -Released September 4, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- tl;dr: If you have experienced the following error with the [`create-vm`][create-vm] task this is fixed. - - ```bash - creating the new opsman vm - Using gcp... - Error: unexpected error: could not marshal image file: yaml: unmarshal errors: - line 6: cannot unmarshal !!map into string - ``` - - With GCP OpsManager, the image YAML file format includes a new key. - - The original format of the image YAML was: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - ``` - - The new format includes the `image` key: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - image: - name: ops-manager-2-9-10-build-177 - project: pivotal-ops-manager-images - ``` - - This patch ignores this value, where previously it would've not been able to parse it. - -- The container image has been fixed to support the `registry-image` Concourse resource -- With [`credhub-interpolate`][credhub-interpolate] task, - users were using secrets as a way to interpolate the same Credhub value to multiple vars values. - This allowed not having ot repeat the same value in Credhub for each var value. - Support has been added to the [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] workflow to support secrets evaluation. - - For example, given a `config.yml`, - - ```yaml - product-name: some - product-properties: - email-password: ((email-password)) - ssh-password: ((ssh-password)) - ``` - - And given a `vars.yml`, - - ```yaml - email-password: ((password)) - ssh-password: ((password)) - ``` - - Each task will now fully evaluate the parameter `((password))` as a value from the Concourse configured secret manager. - This fixes the issue where `((password))` would have been the actual _string_ value for - `email-password` and `ssh-password`. - -## v4.4.6 -Released August 20, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- `configure-product` will no longer assign a new GUID for unnamed collections. - This means that for some tiles, - configure-product will now avoid unnecessary changes to collections. -- `download-product` will work with supported versions of TAS Windows - released after Friday August 20th, 2020. - These versions do not work with older versions of Platform Automation. - The TAS Windows tiles on Tanzu Network now include Open Source License files - in the tile itself. - Platform Automation needed to bump the winfs-injector version - to ensure compatibility with this new arrangement. -- CVE updates to container image. Resolves [USN-4466-1](https://ubuntu.com/security/notices/USN-4466-1) - The CVE is related to vulnerabilities in curl and libcurl. - -## v4.4.5 -Released July 30, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] will now inject a params block - into the passed in task if it is missing. -- CVE update to container image. Resolves [USN-4416-1](https://usn.ubuntu.com/4416-1/). - The CVEs are related to vulnerabilities with `libc6` and related libraries. -- CVE update to container image. Resolves [USN-4428-1](https://usn.ubuntu.com/4428-1/). - The CVEs are related to vulnerabilities with `python2.7`, `python2.7-minimal`, `python3.5`, `python3.5-minimal` and related libraries. - -## v4.4.4 -Released July 10, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [5.0.0](https://github.com/pivotal-cf/om/releases/tag/5.0.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- When using [`update-runtime-config`][update-runtime-config] task, - we've added the `param` for `RELEASES_GLOB` to help limit the releases being uploaded. - This is especially useful when using the bosh-io-release concourse resource, - which has other files besides the `release.tgz` when it peforms a `get`. -- CVE update to container image. Resolves [USN-4402-1](https://usn.ubuntu.com/4402-1/). - The CVEs are related to vulnerabilities with `curl` and related libraries. - -## v4.4.3 -Released June 16, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- When using [`create-vm`][create-vm], AWS did not support tagging the VM. - This has been added to the [AWS opsman config][opsman-config] - - Tags can be added to the config file in two formats: - - ```yaml - tags: {key: value} - ``` - - or - - ```yaml - tags: - - key: value - - key2: value - ``` - -- CVE update to container image. Resolves [USN-4394-1](https://usn.ubuntu.com/4394-1/). - The CVEs are related to vulnerabilities with `libsqlite`.4. - -## v4.4.2 -Released June 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4377-1](https://usn.ubuntu.com/4377-1/). - The CVEs are related to vulnerabilities with `ca-certificates`. - -## v4.4.1 -Released June 4, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### What's New -- The [`stage-product`][stage-product] and [`stage-configure-apply`][stage-configure-apply] tasks - have been updated to no longer require a `product` input. - - This change allows tiles to be staged without requiring the product file to be passed to these tasks. - If the `product` input is not provided, - the `CONFIG_FILE` and `STAGE_PRODUCT_CONFIG_FILE` params **are required** in their appropriate tasks. - -- [`upgrade-opsman`][upgrade-opsman] now supports configuring settings - on the Ops Manager Settings page in the UI. - This utilizes the `configure-opsman` command from `om`, - and runs after the upgrade command. - Configuration can be added directly to [`opsman.yml`][inputs-outputs-configure-opsman]. - An example of all configurable properties can be found in the "Additional Settings" tab. - -- [`configure-opsman`][configure-opsman] task has been added. - - This task supports configuring settings - on the Ops Manager Settings page in the UI. - - Configuration can be added directly to [`opsman.yml`][inputs-outputs-configure-opsman]. - An example of all configurable properties can be found in the "Additional Settings" tab. - -- [`download-product`][download-product] now supports - specifying a version in the config file for the stemcell - if the latest stemcell for the product is not desired. - - An example config for downloading a product from Tanzu Network: - - ```yaml - # download-product.yml - --- - pivnet-api-token: token - pivnet-file-glob: "*.pivotal" - pivnet-product-slug: product-slug - product-version: 1.2.3 - stemcell-iaas: aws - stemcell-version: 90.90 - ``` - -- The [`prepare-image`][prepare-image] task has been added. - - This task allows you to temporarily inject a CA onto the Platform Automation image - for use in subsequent tasks within the same job. - - This updated image _does not need to be persisted_, - and can be used directly by subsequent tasks with no other changes to `pipeline.yml`. - - The task allows proper ssl validation when using `om` commands. - To fully take advantage of this feature, remove `skip-ssl-validation: true` from your `env.yml`. - - For an example of how this fits into a `pipeline.yml`, check out the [Ops Manager + Multiple Products pipeline][reference-pipeline] - -- The [`replicate-product`][replicate-product] task has been added. - This task requires a downloaded product, - and will output the replicated product for isolation segments. - - Supported products: `p-isolation-segment`, `p-windows-runtime`, `pas-windows` - -- The [`update-runtime-config`][update-runtime-config] task has been added. - *Please note this is an advanced feature, and should be used at your own discretion.* - -## v4.3.21 -October 23, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.163 | - | azure-cli | 2.13.0 | - | bbr-cli | 1.8.0 | - | bosh-cli | [v6.4.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.1) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 315.0.0 | - | govc-cli | 0.23.0 | - | om | [6.4.2](https://github.com/pivotal-cf/om/releases/tag/6.4.2) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4581-1](https://usn.ubuntu.com/4581-1/). - The CVEs are related to vulnerabilities with `python3.6` and related libraries. - This affects the all IAAS container images only. -- CVE update to container image. Resolves [USN-4601-1](https://usn.ubuntu.com/4601-1/). - The CVEs are related to vulnerabilities with `python3-pip` and related libraries. - This affects the all IAAS container images only. - - -## v4.3.20 -October 14, 2020 - -!!! note "Updates to CLI versions" - We've now updated our list of CLI versions. - It includes the supported IAAS CLIs. - -??? info "CLI Versions" - - | Name | version | - |---|---| - | aws-cli | 1.18.157 | - | azure-cli | 2.13.0 | - | bosh-cli | [6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | gcloud-cli | 314.0.0 | - | govc-cli | 0.23.0 | - | om | [6.4.1](https://github.com/pivotal-cf/om/releases/tag/6.4.1) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Do to a release packaging issue, - the previous patch version had issues invoking `openstack` CLI. - This release ensures this has been fixed. - We've also added further release testing to ensure it doesn't happen again. - - - -## v4.3.19 -October 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.4.1](https://github.com/pivotal-cf/om/releases/tag/6.4.1) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The "bug fixes" for collections in `om` 6.1.2+ - were causing unexpected issues in some tiles. - The collection work has been reverted - to its original functionality. -- [`pending-changes`][check-pending-changes] and [`stage-configure-apply`][stage-configure-apply] - would always fail if a product is unconfigured, new, or missing a stemcell, - regardless of whether `ALLOW_PENDING_CHANGES` was set. - This has been fixed. `pending-changes` will only fail if `ALLOW_PENDING_CHANGES: false`. - - -## v4.3.18 -October 2, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.3.0](https://github.com/pivotal-cf/om/releases/tag/6.3.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes - - CVE update to container image. Resolves [USN-4512-1](https://usn.ubuntu.com/4512-1/). - The CVEs are related to vulnerabilities with `util-linux` and related libraries. - - CVE update to container image. Resolves [USN-4504-1](https://usn.ubuntu.com/4504-1/). - The CVEs are related to vulnerabilities with `libssl` and related libraries. - - -## v4.3.17 -September 15, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.3.0](https://github.com/pivotal-cf/om/releases/tag/6.3.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- There was an issue with how `om` retrieved credentials - within the new heuristic logic for collections updates. - In particular, this impacted the upgrade from TAS 2.6 to 2.7, - and caused `configure-product` to fail. - The error message looked something like this: - - ```bash - 2020/09/01 06:35:54 could not execute "configure-product": failed to configure product: failed to associate guids for property ".properties.credhub_internal_provider_keys" because: - request failed: unexpected response from /api/v0/deployed/products/cf-6bdb4038d37667f9f424/credentials/.properties.credhub_internal_provider_keys[0].key: - HTTP/1.1 404 Not Found - ...more HTTP headers... - ``` - -## v4.3.16 -September 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.2.0](https://github.com/pivotal-cf/om/releases/tag/6.2.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Bump the CLIs for `om`, `credhub`, and `winfs-injector`. - -## v4.3.15 -Released September 4, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- tl;dr: If you have experienced the following error with the [`create-vm`][create-vm] task this is fixed. - - ```bash - creating the new opsman vm - Using gcp... - Error: unexpected error: could not marshal image file: yaml: unmarshal errors: - line 6: cannot unmarshal !!map into string - ``` - - With GCP OpsManager, the image YAML file format includes a new key. - - The original format of the image YAML was: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - ``` - - The new format includes the `image` key: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - image: - name: ops-manager-2-9-10-build-177 - project: pivotal-ops-manager-images - ``` - - This patch ignores this value, where previously it would've not been able to parse it. - -- The container image has been fixed to support the `registry-image` Concourse resource -- With [`credhub-interpolate`][credhub-interpolate] task, - users were using secrets as a way to interpolate the same Credhub value to multiple vars values. - This allowed not having ot repeat the same value in Credhub for each var value. - Support has been added to the [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] workflow to support secrets evaluation. - - For example, given a `config.yml`, - - ```yaml - product-name: some - product-properties: - email-password: ((email-password)) - ssh-password: ((ssh-password)) - ``` - - And given a `vars.yml`, - - ```yaml - email-password: ((password)) - ssh-password: ((password)) - ``` - - Each task will now fully evaluate the parameter `((password))` as a value from the Concourse configured secret manager. - This fixes the issue where `((password))` would have been the actual _string_ value for - `email-password` and `ssh-password`. - -## v4.3.14 -Released August 20, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- `configure-product` will no longer assign a new GUID for unnamed collections. - This means that for some tiles, - configure-product will now avoid unnecessary changes to collections. -- `download-product` will work with supported versions of TAS Windows - released after Friday August 20th, 2020. - These versions do not work with older versions of Platform Automation. - The TAS Windows tiles on Tanzu Network now include Open Source License files - in the tile itself. - Platform Automation needed to bump the winfs-injector version - to ensure compatibility with this new arrangement. -- CVE updates to container image. Resolves [USN-4466-1](https://ubuntu.com/security/notices/USN-4466-1) - The CVE is related to vulnerabilities in curl and libcurl. - -## v4.3.13 -Released July 30, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] will now inject a params block - into the passed in task if it is missing. -- CVE update to container image. Resolves [USN-4416-1](https://usn.ubuntu.com/4416-1/). - The CVEs are related to vulnerabilities with `libc6` and related libraries. -- CVE update to container image. Resolves [USN-4428-1](https://usn.ubuntu.com/4428-1/). - The CVEs are related to vulnerabilities with `python2.7`, `python2.7-minimal`, `python3.5`, `python3.5-minimal` and related libraries. - -## v4.3.12 -Released July 10, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [5.0.0](https://github.com/pivotal-cf/om/releases/tag/5.0.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4402-1](https://usn.ubuntu.com/4402-1/). - The CVEs are related to vulnerabilities with `curl` and related libraries. - -## v4.3.11 -Released June 15, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4394-1](https://usn.ubuntu.com/4394-1/). - The CVEs are related to vulnerabilities with `libsqlite`. - -## v4.3.10 -Released June 8, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4376-1](https://usn.ubuntu.com/4376-1/). - The CVEs are related to vulnerabilities with `libssl`. -- CVE update to container image. Resolves [USN-4377-1](https://usn.ubuntu.com/4377-1/). - The CVEs are related to vulnerabilities with `ca-certificates`. -- vSphere 7.0 with NSX-T 3.0 experienced a bug when using `create-vm` and `upgrade-opsman`. - If NSX-T deployed a network that was read in the vCenter as multiple port groups with the same name - those tasks would fail, and be unable to import the Ops Manager OVA file. - - The `network` property when creating an Ops Manager VM can take two new types of identifiers for identify a network. - - 1. If using port groups, the `network` property must be `switch name/port group name`. - For example, `network: edge-cluster-w01-vc-AZ01-vds01/pas-infrastructure-az1-ls`. - 1. [MO reference](https://kb.vmware.com/s/article/1017126) can also be used. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now supports ops manager syslog in tiles. - In the tile metadata, this property is turned on with the `opsmanager_syslog: true` field. - Tiles with this property enabled will now add the section to `product.yml` - and create defaults in `default-vars.yml`. -- Added shorthand flag consistency to multiple commands. - `--vars-file` shorthand is `-l` and `--var` shorthand is `-v` -- **EXPERIMENTAL** `config-template` can specify the number of collection ops files using `--size-of-collections`. - Some use cases required that collections generate more ops-file for usage. - The default value is still `10`. -- `config-template` has been updated to include placeholders for - `network_name`, `singleton_availability_zone`, and `service_network_name` - in `required-vars.yml` when appropriate. -- `config-template` Bug Fix: Required collections now parametrize correctly in `product.yml`. - In the [om issue](https://github.com/pivotal-cf/om/issues/483) - for `p-dataflow`, the following was _incorrectly_ returned: - ``` - .properties.maven_repositories: - value: - - key: spring - password: ((password)) - url: https://repo.spring.io/libs-release - username: username - ``` - - `config-template` now returns the following correct subsection in `product.yml`: - ``` - .properties.maven_repositories: - value: - - key: spring - password: - secret: ((password)) - url: https://repo.spring.io/libs-release - username: username - ``` - - **if you have used the workaround described in the issue** - (storing the value as a JSON object) - you will need to update the credential in Credhub - to not be a JSON object. -- `config-template` generated `resource-vars.yml` - that had the potential to conflict with property names - (spring cloud dataflow had a configurable property called `max_in_flight` - which is also a resource config property). - `config-template` now prepends **all** resource-vars with `resource-var-`. - This prevents this entire class of conflicts. - If using `config-template` to update vars/ops-files/etc, - check your resource var names in any files vars may be drawn from. - This resolves om issue [#484](https://github.com/pivotal-cf/om/issues/484). - -## v4.3.8 -Released May 20, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- _Sometimes_ vsphere `create-vm`/`delete-vm`/`upgrade-opsman` would fail with: - `govc[stderr]: panic: send on closed channel` - due to a bug in [govc](https://github.com/vmware/govmomi/issues/1972). - - These tasks have implemented the workaround described in the issue. -- CVE update to container image. Resolves [USN-4359-1](https://usn.ubuntu.com/4359-1/). - The CVEs are related to vulnerabilities with `apt`. - -## v4.3.6 -Released April 28, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `IGNORE_WARNINGS` parameter for the - `apply-changes`, `stage-configure-apply`, and `apply-director-changes` tasks - allows users to ignore all warnings from ignorable verifiers. - Some verifiers continue to return warnings even when disabled, - preventing deployment without the `IGNORE_WARNINGS: true` param set. - If the verifiers that are preventing deployment - are known issues based on the environment setup, - then it is safe to use the flag. - It is _highly recommended_ to disable verifiers before ignoring warnings. -- CVE update to container image. Resolves [USN-4329-1](https://usn.ubuntu.com/4329-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4334-1](https://usn.ubuntu.com/4334-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4333-1](https://usn.ubuntu.com/4333-1/). - This CVE is related to vulnerabilities with `python`. -- Adding back the removed `ssh` Ubuntu package. - -## v4.3.5 -Released April 24, 2020 - -!!! bug "Known Issue" - This version attempted to remove some unnecessary dependencies from the image. - In this process, important utilities may have been removed as well. - In particular, we know that `ssh` is missing. - If you use this version and find any vital tools missing, please let us know. - A forthcoming patch version will restore `ssh` and any other identified tools. - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `winfs-injector` has been bumped to support the new TAS Windows tile. - When downloading a product from Pivnet, the [`download-product`][download-product] task - uses `winfs-injector` to package the Windows rootfs in the tile. - Newer version of TAS Windows, use a new packaging method, which requires this bump. - - If you see the following error, you need this fix. - - ``` - Checking if product needs winfs injected...+ '[' pas-windows == pas-windows ']' - + '[' pivnet == pivnet ']' - ++ basename downloaded-files/pas-windows-2.7.12-build.2.pivotal - + TILE_FILENAME=pas-windows-2.7.12-build.2.pivotal - + winfs-injector --input-tile downloaded-files/pas-windows-2.7.12-build.2.pivotal --output-tile downloaded-product/pas-windows-2.7.12-build.2.pivotal - open /tmp/015434627/extracted-tile/embed/windowsfs-release/src/code.cloudfoundry.org/windows2016fs/2019/IMAGE_TAG: no such file or directory - ``` - -## v4.3.4 -Released March 25, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.6.2](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.2) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] now correctly allows for an optional vars input. -- `configure-director` now correctly handles when you don't name your iaas_configuration `default` on vSphere. - Previously, naming a configuration anything other than `default` would result in an extra, empty `default` configuration. - This closes issue [#469](https://github.com/pivotal-cf/om/issues/469). -- Downloading a stemcell associated with a product will try to download the light or heavy stemcell. - If anyone has experienced the recent issue with `download-product` - and the AWS heavy stemcell, - this will resolve your issue. - Please remove any custom globbing that might've been added to circumvent this issue. - For example, `stemcell-iaas: light*aws` should just be `stemcell-iaas: aws` now. -- Heavy stemcells could not be downloaded. - Support has now been added. - Define `stemcell-heavy: true` in your [`download-product` config file][download-product-config]. -- CVE update to container image. Resolves [USN-4298-1](https://usn.ubuntu.com/4298-1/). - This CVE is related to vulnerabilities with `libsqlite3`. -- CVE update to container image. Resolves [USN-4305-1](https://usn.ubuntu.com/4305-1/). - This CVE is related to vulnerabilities with `libicu60`. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now includes the option to use a local product file with `--product-path`. - -## v4.3.3 -Released February 26, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.5.0](https://github.com/pivotal-cf/om/releases/tag/4.5.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.6.2](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.2) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- GCP [`create-vm`][create-vm] now correctly handles an empty tags list -- All default VM names are now `ops-manager-vm` to conform with IAAS name requirements. - - GCP did not allow for capital letters in VM names. -- CVE update to container image. Resolves [USN-4274-1](https://usn.ubuntu.com/4274-1/). - The CVEs are related to vulnerabilities with `libxml2`. -- Bumped the following low-severity CVE packages: libsystemd0 libudev1 - -## v4.3.2 -February 11, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.4.2](https://github.com/pivotal-cf/om/releases/tag/4.4.2) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.2](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.2) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### What's New -- The [config][download-product-config] for the `download-product` task now recommends using `file-glob` instead of `pivnet-file-glob`. - -### Bug Fixes -- CVE update to container image. Resolves [USN-4243-1](https://usn.ubuntu.com/4243-1/). - The CVEs are related to vulnerabilities with `libbsd`. -- CVE update to container image. Resolves [USN-4249-1](https://usn.ubuntu.com/4249-1/). - The CVEs are related to vulnerabilities with `e2fsprogs`. -- CVE update to container image. Resolves [USN-4233-2](https://usn.ubuntu.com/4233-2/). - The CVEs are related to vulnerabilities with `libgnutls30`. -- CVE update to container image. Resolves [USN-4256-1](https://usn.ubuntu.com/4256-1/). - The CVEs are related to vulnerabilities with `libsasl2-2`. -- Bumped the following low-severity CVE packages: `libcom-err2`, `libext2fs2`, `libss2`, `linux-libc-dev` - -## v4.3.0 -Released January 31, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.4.1](https://github.com/pivotal-cf/om/releases/tag/4.4.1) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.2](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.2) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### What's New -- The [`revert-staged-changes`][revert-staged-changes] task has been added. - This allows changes that weren't part of the pipeline to be undone, - by guaranteeing a clean state before using various `configure-*` tasks. -- The `p-automator` CLI includes the ability to extract the Ops Manager VM configuration for Azure and vSphere. - This works for Ops Managers that are already running and useful when [migrating to automation][upgrade-how-to]. -- The `credhub` cli now returns a list of parameters it could not find when `--skip-missing` is enabled. - This feature will show up in the [`credhub-interpolate`][credhub-interpolate], - when `SKIP_MISSING: true` is set. -- The [`prepare-tasks-with-secrets`][prepare-tasks-with-secrets] task has been added. - It replaces the [`credhub-interpolate`][credhub-interpolate] task and provides the following benefits: - - Support for all native Concourse secrets stores including Credhub and Vault. - - Credhub credentials are no longer required by the task so they can be completely handled by concourse. - - Credentials are no longer written to disk which alleviates some security concerns. - - For a detailed explanation of this new task, see [Using prepare-tasks-with-secrets][prepare-tasks-with-secrets-how-to]. - To replace `credhub-interpolate` with this new task, see [Replacing credhub-interpolate with prepare-tasks-with-secrets][prepare-tasks-with-secrets-replace]. (Note: This task uses a Concourse feature that allows inputs and outputs to have the same name. This feature is only available in Concourse 5+. prepare-tasks-with-secrets does not work with Concourse 4.) - -- The docker image includes the correct flavor of `nc` (`netcat-openbsd`) to be used with `bosh ssh`. -- Add the ability to recreate VMs to the [`apply-changes`][apply-changes] and [`stage-configure-apply`][stage-configure-apply] tasks. - - If `RECREATE: true`, these commands will recreate all product VMs for their relevant product(s). - - To recreate the BOSH director VM, make any change to the director tile, and apply-changes. - We recommend modifying the Custom SSH Banner if this is desired. - -### Bug Fixes -- The p-automator binary no longer accepts unknown flags for faster debug feedback -- CVE update to container image. Resolves [USN-4236-1](https://usn.ubuntu.com/4236-1/). - The CVEs are related to vulnerabilities with `Libgcrypt`. -- CVE update to container image. Resolves [USN-4233-1](https://usn.ubuntu.com/4233-1/). - The CVEs are related to vulnerabilities with `GnuTLS`. - -## v4.2.20 -September 15, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.3.0](https://github.com/pivotal-cf/om/releases/tag/6.3.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- There was an issue with how `om` retrieved credentials - within the new heuristic logic for collections updates. - In particular, this impacted the upgrade from TAS 2.6 to 2.7, - and caused `configure-product` to fail. - The error message looked something like this: - - ```bash - 2020/09/01 06:35:54 could not execute "configure-product": failed to configure product: failed to associate guids for property ".properties.credhub_internal_provider_keys" because: - request failed: unexpected response from /api/v0/deployed/products/cf-6bdb4038d37667f9f424/credentials/.properties.credhub_internal_provider_keys[0].key: - HTTP/1.1 404 Not Found - ...more HTTP headers... - ``` - -## v4.2.19 -September 9, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.2.0](https://github.com/pivotal-cf/om/releases/tag/6.2.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.8.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.8.0) | - | winfs-injector | [0.19.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.19.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Bump the CLIs for `om`, `credhub`, and `winfs-injector`. - -## v4.2.18 -Released September 4, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.4.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.4.0) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- tl;dr: If you have experienced the following error with the [`create-vm`][create-vm] task this is fixed. - - ```bash - creating the new opsman vm - Using gcp... - Error: unexpected error: could not marshal image file: yaml: unmarshal errors: - line 6: cannot unmarshal !!map into string - ``` - - With GCP OpsManager, the image YAML file format includes a new key. - - The original format of the image YAML was: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.9-build.164.tar.gz - ``` - - The new format includes the `image` key: - - ```yaml - --- - us: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - eu: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - asia: ops-manager-us/pcf-gcp-2.9.10-build.177.tar.gz - image: - name: ops-manager-2-9-10-build-177 - project: pivotal-ops-manager-images - ``` - - This patch ignores this value, where previously it would've not been able to parse it. - -- The container image has been fixed to support the `registry-image` Concourse resource - - For example, given a `config.yml`, - - ```yaml - product-name: some - product-properties: - email-password: ((email-password)) - ssh-password: ((ssh-password)) - ``` - - And given a `vars.yml`, - - ```yaml - email-password: ((password)) - ssh-password: ((password)) - ``` - - Each task will now fully evaluate the parameter `((password))` as a value from the Concourse configured secret manager. - This fixes the issue where `((password))` would have been the actual _string_ value for - `email-password` and `ssh-password`. - -## v4.2.17 -Released August 20, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- `configure-product` will no longer assign a new GUID for unnamed collections. - This means that for some tiles, - configure-product will now avoid unnecessary changes to collections. -- `download-product` will work with supported versions of TAS Windows - released after Friday August 20th, 2020. - These versions do not work with older versions of Platform Automation. - The TAS Windows tiles on Tanzu Network now include Open Source License files - in the tile itself. - Platform Automation needed to bump the winfs-injector version - to ensure compatibility with this new arrangement. -- CVE updates to container image. Resolves [USN-4466-1](https://ubuntu.com/security/notices/USN-4466-1) - The CVE is related to vulnerabilities in curl and libcurl. - -## v4.2.16 -Released July 30, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4416-1](https://usn.ubuntu.com/4416-1/). - The CVEs are related to vulnerabilities with `libc6` and related libraries. -- CVE update to container image. Resolves [USN-4428-1](https://usn.ubuntu.com/4428-1/). - The CVEs are related to vulnerabilities with `python2.7`, `python2.7-minimal`, `python3.5`, `python3.5-minimal` and related libraries. - -## v4.2.15 -Released July 10, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [5.0.0](https://github.com/pivotal-cf/om/releases/tag/5.0.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4402-1](https://usn.ubuntu.com/4402-1/). - The CVEs are related to vulnerabilities with `curl` and related libraries. - -## v4.2.14 -Released June 15, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4394-1](https://usn.ubuntu.com/4394-1/). - The CVEs are related to vulnerabilities with `libsqlite`. - -## v4.2.13 -Released June 5, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4376-1](https://usn.ubuntu.com/4376-1/). - The CVEs are related to vulnerabilities with `libssl`. -- CVE update to container image. Resolves [USN-4377-1](https://usn.ubuntu.com/4377-1/). - The CVEs are related to vulnerabilities with `ca-certificates`. -- vSphere 7.0 with NSX-T 3.0 experienced a bug when using `create-vm` and `upgrade-opsman`. - If NSX-T deployed a network that was read in the vCenter as multiple port groups with the same name - those tasks would fail, and be unable to import the Ops Manager OVA file. - - The `network` property when creating an Ops Manager VM can take two new types of identifiers for identify a network. - - 1. If using port groups, the `network` property must be `switch name/port group name`. - For example, `network: edge-cluster-w01-vc-AZ01-vds01/pas-infrastructure-az1-ls`. - 1. [MO reference](https://kb.vmware.com/s/article/1017126) can also be used. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now supports ops manager syslog in tiles. - In the tile metadata, this property is turned on with the `opsmanager_syslog: true` field. - Tiles with this property enabled will now add the section to `product.yml` - and create defaults in `default-vars.yml`. -- Added shorthand flag consistency to multiple commands. - `--vars-file` shorthand is `-l` and `--var` shorthand is `-v` -- **EXPERIMENTAL** `config-template` can specify the number of collection ops files using `--size-of-collections`. - Some use cases required that collections generate more ops-file for usage. - The default value is still `10`. -- `config-template` has been updated to include placeholders for - `network_name`, `singleton_availability_zone`, and `service_network_name` - in `required-vars.yml` when appropriate. -- `config-template` Bug Fix: Required collections now parametrize correctly in `product.yml`. - In the [om issue](https://github.com/pivotal-cf/om/issues/483) - for `p-dataflow`, the following was _incorrectly_ returned: - ``` - .properties.maven_repositories: - value: - - key: spring - password: ((password)) - url: https://repo.spring.io/libs-release - username: username - ``` - - `config-template` now returns the following correct subsection in `product.yml`: - ``` - .properties.maven_repositories: - value: - - key: spring - password: - secret: ((password)) - url: https://repo.spring.io/libs-release - username: username - ``` - - **if you have used the workaround described in the issue** - (storing the value as a JSON object) - you will need to update the credential in Credhub - to not be a JSON object. -- `config-template` generated `resource-vars.yml` - that had the potential to conflict with property names - (spring cloud dataflow had a configurable property called `max_in_flight` - which is also a resource config property). - `config-template` now prepends **all** resource-vars with `resource-var-`. - This prevents this entire class of conflicts. - If using `config-template` to update vars/ops-files/etc, - check your resource var names in any files vars may be drawn from. - This resolves om issue [#484](https://github.com/pivotal-cf/om/issues/484). - -## v4.2.11 -Released May 20, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- _Sometimes_ vsphere `create-vm`/`delete-vm`/`upgrade-opsman` would fail with: - `govc[stderr]: panic: send on closed channel` - due to a bug in [govc](https://github.com/vmware/govmomi/issues/1972). - - These tasks have implemented the workaround described in the issue. - -- CVE update to container image. Resolves [USN-4359-1](https://usn.ubuntu.com/4359-1/). - The CVEs are related to vulnerabilities with `apt`. - -## v4.2.9 -Released April 28, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4329-1](https://usn.ubuntu.com/4329-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4334-1](https://usn.ubuntu.com/4334-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4333-1](https://usn.ubuntu.com/4333-1/). - This CVE is related to vulnerabilities with `python`. - -## v4.2.8 -Released April 24, 2020 - -!!! bug "Known Issue" - This version attempted to remove some unnecessary dependencies from the image. - In this process, important utilities may have been removed as well. - In particular, we know that `ssh` is missing. - If you use this version and find any vital tools missing, please let us know. - A forthcoming patch version will restore `ssh` and any other identified tools. - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `winfs-injector` has been bumped to support the new TAS Windows tile. - When downloading a product from Pivnet, the [`download-product`][download-product] task - uses `winfs-injector` to package the Windows rootfs in the tile. - Newer version of TAS Windows, use a new packaging method, which requires this bump. - - If you see the following error, you need this fix. - - ``` - Checking if product needs winfs injected...+ '[' pas-windows == pas-windows ']' - + '[' pivnet == pivnet ']' - ++ basename downloaded-files/pas-windows-2.7.12-build.2.pivotal - + TILE_FILENAME=pas-windows-2.7.12-build.2.pivotal - + winfs-injector --input-tile downloaded-files/pas-windows-2.7.12-build.2.pivotal --output-tile downloaded-product/pas-windows-2.7.12-build.2.pivotal - open /tmp/015434627/extracted-tile/embed/windowsfs-release/src/code.cloudfoundry.org/windows2016fs/2019/IMAGE_TAG: no such file or directory - ``` - -## v4.2.7 -Released March 25, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- `configure-director` now correctly handles when you don't name your iaas_configuration `default` on vSphere. - Previously, naming a configuration anything other than `default` would result in an extra, empty `default` configuration. - This closes issue [#469](https://github.com/pivotal-cf/om/issues/469). -- Downloading a stemcell associated with a product will try to download the light or heavy stemcell. - If anyone has experienced the recent issue with `download-product` - and the AWS heavy stemcell, - this will resolve your issue. - Please remove any custom globbing that might've been added to circumvent this issue. - For example, `stemcall-iaas: light*aws` should just be `stemcell-iaas: aws` now. -- Heavy stemcells could not be downloaded. - Support has now been added. - Define `stemcell-heavy: true` in your `download-product` config file. -- CVE update to container image. Resolves [USN-4298-1](https://usn.ubuntu.com/4298-1/). - This CVE is related to vulnerabilities with `libsqlite3`. -- CVE update to container image. Resolves [USN-4305-1](https://usn.ubuntu.com/4305-1/). - This CVE is related to vulnerabilities with `libicu60`. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now includes the option to use a local product file with `--product-path`. - -## v4.2.6 -Released February 21, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.3.0](https://github.com/pivotal-cf/om/releases/tag/4.3.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- GCP [`create-vm`][create-vm] now correctly handles an empty tags list -- CVE update to container image. Resolves [USN-4274-1](https://usn.ubuntu.com/4274-1/). - The CVEs are related to vulnerabilities with `libxml2`. -- Bumped the following low-severity CVE packages: libsystemd0 libudev1 - -## v4.2.5 -Released February 10, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.3.0](https://github.com/pivotal-cf/om/releases/tag/4.3.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4243-1](https://usn.ubuntu.com/4243-1/). - The CVEs are related to vulnerabilities with `libbsd`. -- CVE update to container image. Resolves [USN-4249-1](https://usn.ubuntu.com/4249-1/). - The CVEs are related to vulnerabilities with `e2fsprogs`. -- CVE update to container image. Resolves [USN-4233-2](https://usn.ubuntu.com/4233-2/). - The CVEs are related to vulnerabilities with `libgnutls30`. -- CVE update to container image. Resolves [USN-4256-1](https://usn.ubuntu.com/4256-1/). - The CVEs are related to vulnerabilities with `libsasl2-2`. -- Bumped the following low-severity CVE packages: `libcom-err2`, `libext2fs2`, `libss2`, `linux-libc-dev` - -## v4.2.4 -Released January 28, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.3.0](https://github.com/pivotal-cf/om/releases/tag/4.3.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4236-1](https://usn.ubuntu.com/4236-1/). - The CVEs are related to vulnerabilities with `Libgcrypt`. -- CVE update to container image. Resolves [USN-4233-1](https://usn.ubuntu.com/4233-1/). - The CVEs are related to vulnerabilities with `GnuTLS`. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v4.2.3 -Released December 12, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.3.0](https://github.com/pivotal-cf/om/releases/tag/4.3.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - -### Bug Fixes -- When specifying `StorageSKU` for azure, `p-automator` would append `--storage-sku` twice in the creating VM invocation. - It does not affect anything, but we removed the second instance to avoid confusion. -- CVE update to container image. Resolves [USN-4220-1](https://usn.ubuntu.com/4220-1/). - The CVEs are related to vulnerabilities with `git`. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v4.2.2 -Released December 3, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.3.0](https://github.com/pivotal-cf/om/releases/tag/4.3.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - -### What's New -- The `p-automator` CLI includes the ability to extract the Ops Manager VM configuration (GCP and AWS Only at the moment). - This works for Ops Managers that are already running and useful when [migrating to automation][upgrade-how-to]. - - Usage: - - 1. Get the Platform Automation Toolkit image from Tanzu Network. - 1. Import that image into `docker` to run the [`p-automation` locally][running-commands-locally]. - 1. Create a [state file][state] that represents your current VM and IAAS. - 1. Invoke the `p-automator` CLI to get the configuration. - - For example, on AWS with an access key and secret key: - - ```bash - docker run -it --rm -v $PWD:/workspace -w /workspace platform-automation-image \ - p-automator export-opsman-config \ - --state-file=state.yml \ - --aws-region=us-west-1 \ - --aws-secret-access-key some-secret-key \ - --aws-access-key-id some-access-key - ``` - - The outputted `opsman.yml` contains the information needed for Platform Automation Toolkit to manage the Ops Manager VM. - -- When creating an `create-vm` task for Azure, - the disk type and VM type can be specified. - The configuration `storage_sku` and `vm_size` use the Azure values accordingly. -- The [`download-product`][download-product] task now supports the `SOURCE` param - to specify where to download products and stemcells from. - The supported sources are the Azure(`azure`), GCS(`gcs`), S3(`s3`), Tanzu Network(`pivnet`). -- [`configure-authentication`][configure-authentication], - [`configure-ldap-authentication`][configure-ldap-authentication], and - [`configure-saml-authentication`][configure-saml-authentication] - now support passing through vars files to the underlying `om` command. -- When using [`configure-product`][configure-product] and [`configure-director`][configure-director], - the `additional_vm_extensions` for a resource will have the following behaviour: - - If not set in config file, the value from Ops Manager will be persisted. - - If defined in the config file and an emtpy array (`[]`), the values on Ops Manager will be removed. - - If defined in the file with a value (`["web_lb"]`), these values will be set on Ops Manager. -- When using [`configure-director`][configure-director] - `vmextensions-configuration` can be defined to add|remove vm_extensions - to|from the BOSH director. An example of this in the config: - - ```yaml - vmextensions-configuration: - - name: a_vm_extension - cloud_properties: - source_dest_check: false - - name: another_vm_extension - cloud_properties: - foo: bar - ``` - -### Deprecation Notices -- The `download-product-s3` task has been deprecated - in favor of the [`download-product`][download-product] task and setting the `SOURCE: s3` in `params`. - - For example, the `download-product-s3` in a pipeline: - - ```yaml - - task: download-pas - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product-s3.yml - params: - CONFIG_FILE: download-product/pas.yml - ``` - - Will be changed to: - - ```yaml - - task: download-pas - image: platform-automation-image - file: platform-automation-tasks/tasks/download-product.yml - params: - CONFIG_FILE: download-product/pas.yml - SOURCE: s3 - ``` - -### Bug Fixes -- When creating a Ops Manager on Azure, - there was a bug in offline environments. - We are now using the full image reference ID when creating the VM. -- CVE update to container image. Resolves [USN-4205-1](https://usn.ubuntu.com/4205-1/). - This CVE is related to vulnerabilities with `libsqlite3`. - None of our code calls `libsqlite3` directly, but the IaaS CLIs rely on this package. - -## v4.1.22 -Released August 20, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.18.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.18.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- `configure-product` will no longer assign a new GUID for unnamed collections. - This means that for some tiles, - configure-product will now avoid unnecessary changes to collections. -- `download-product` will work with supported versions of TAS Windows - released after Friday August 20th, 2020. - These versions do not work with older versions of Platform Automation. - The TAS Windows tiles on Tanzu Network now include Open Source License files - in the tile itself. - Platform Automation needed to bump the winfs-injector version - to ensure compatibility with this new arrangement. -- CVE updates to container image. Resolves [USN-4466-1](https://ubuntu.com/security/notices/USN-4466-1) - The CVE is related to vulnerabilities in curl and libcurl. - -## v4.1.21 -Released July 30, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [6.1.0](https://github.com/pivotal-cf/om/releases/tag/6.1.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4416-1](https://usn.ubuntu.com/4416-1/). - The CVEs are related to vulnerabilities with `libc6` and related libraries. -- CVE update to container image. Resolves [USN-4428-1](https://usn.ubuntu.com/4428-1/). - The CVEs are related to vulnerabilities with `python2.7`, `python2.7-minimal`, `python3.5`, `python3.5-minimal` and related libraries. - -## v4.1.20 -Released July 10, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [5.0.0](https://github.com/pivotal-cf/om/releases/tag/5.0.0) | - | bosh-cli | [v6.3.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.3.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4394-1](https://usn.ubuntu.com/4394-1/). - The CVEs are related to vulnerabilities with `libsqlite`. - -## v4.1.19 -Released June 15, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4394-1](https://usn.ubuntu.com/4394-1/). - The CVEs are related to vulnerabilities with `libsqlite`. - -## v4.1.18 -Released June 5, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.8.0](https://github.com/pivotal-cf/om/releases/tag/4.8.0) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.7.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.7.0) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4376-1](https://usn.ubuntu.com/4376-1/). - The CVEs are related to vulnerabilities with `libssl`. -- CVE update to container image. Resolves [USN-4377-1](https://usn.ubuntu.com/4377-1/). - The CVEs are related to vulnerabilities with `ca-certificates`. -- vSphere 7.0 with NSX-T 3.0 experienced a bug when using `create-vm` and `upgrade-opsman`. - If NSX-T deployed a network that was read in the vCenter as multiple port groups with the same name - those tasks would fail, and be unable to import the Ops Manager OVA file. - - The `network` property when creating an Ops Manager VM can take two new types of identifiers for identify a network. - - 1. If using port groups, the `network` property must be `switch name/port group name`. - For example, `network: edge-cluster-w01-vc-AZ01-vds01/pas-infrastructure-az1-ls`. - 1. [MO reference](https://kb.vmware.com/s/article/1017126) can also be used. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now supports ops manager syslog in tiles. - In the tile metadata, this property is turned on with the `opsmanager_syslog: true` field. - Tiles with this property enabled will now add the section to `product.yml` - and create defaults in `default-vars.yml`. -- Added shorthand flag consistency to multiple commands. - `--vars-file` shorthand is `-l` and `--var` shorthand is `-v` -- **EXPERIMENTAL** `config-template` can specify the number of collection ops files using `--size-of-collections`. - Some use cases required that collections generate more ops-file for usage. - The default value is still `10`. -- `config-template` has been updated to include placeholders for - `network_name`, `singleton_availability_zone`, and `service_network_name` - in `required-vars.yml` when appropriate. -- `config-template` Bug Fix: Required collections now parametrize correctly in `product.yml`. - In the [om issue](https://github.com/pivotal-cf/om/issues/483) - for `p-dataflow`, the following was _incorrectly_ returned: - ``` - .properties.maven_repositories: - value: - - key: spring - password: ((password)) - url: https://repo.spring.io/libs-release - username: username - ``` - - `config-template` now returns the following correct subsection in `product.yml`: - ``` - .properties.maven_repositories: - value: - - key: spring - password: - secret: ((password)) - url: https://repo.spring.io/libs-release - username: username - ``` - - **if you have used the workaround described in the issue** - (storing the value as a JSON object) - you will need to update the credential in Credhub - to not be a JSON object. -- `config-template` generated `resource-vars.yml` - that had the potential to conflict with property names - (spring cloud dataflow had a configurable property called `max_in_flight` - which is also a resource config property). - `config-template` now prepends **all** resource-vars with `resource-var-`. - This prevents this entire class of conflicts. - If using `config-template` to update vars/ops-files/etc, - check your resource var names in any files vars may be drawn from. - This resolves om issue [#484](https://github.com/pivotal-cf/om/issues/484). - -## v4.1.16 -Released May 14, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- _Sometimes_ vsphere `create-vm`/`delete-vm`/`upgrade-opsman` would fail with: - `govc[stderr]: panic: send on closed channel` - due to a bug in [govc](https://github.com/vmware/govmomi/issues/1972). - - These tasks have implemented the workaround described in the issue. - -- CVE update to container image. Resolves [USN-4359-1](https://usn.ubuntu.com/4359-1/). - The CVEs are related to vulnerabilities with `apt`. - -## v4.1.14 -Released April 28, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4329-1](https://usn.ubuntu.com/4329-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4334-1](https://usn.ubuntu.com/4334-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4333-1](https://usn.ubuntu.com/4333-1/). - This CVE is related to vulnerabilities with `python`. -- Adding back the removed `ssh` Ubuntu package. - -## v4.1.13 -Released April 20, 2020 - -!!! bug "Known Issue" - This version attempted to remove some unnecessary dependencies from the image. - In this process, important utilities may have been removed as well. - In particular, we know that `ssh` is missing. - If you use this version and find any vital tools missing, please let us know. - A forthcoming patch version will restore `ssh` and any other identified tools. - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `winfs-injector` has been bumped to support the new TAS Windows tile. - When downloading a product from Pivnet, the [`download-product`][download-product] task - uses `winfs-injector` to package the Windows rootfs in the tile. - Newer version of TAS Windows, use a new packaging method, which requires this bump. - - If you see the following error, you need this fix. - - ``` - Checking if product needs winfs injected...+ '[' pas-windows == pas-windows ']' - + '[' pivnet == pivnet ']' - ++ basename downloaded-files/pas-windows-2.7.12-build.2.pivotal - + TILE_FILENAME=pas-windows-2.7.12-build.2.pivotal - + winfs-injector --input-tile downloaded-files/pas-windows-2.7.12-build.2.pivotal --output-tile downloaded-product/pas-windows-2.7.12-build.2.pivotal - open /tmp/015434627/extracted-tile/embed/windowsfs-release/src/code.cloudfoundry.org/windows2016fs/2019/IMAGE_TAG: no such file or directory - ``` - -## v4.1.12 -Released March 25, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.6.0](https://github.com/pivotal-cf/om/releases/tag/4.6.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- `configure-director` now correctly handles when you don't name your iaas_configuration `default` on vSphere. - Previously, naming a configuration anything other than `default` would result in an extra, empty `default` configuration. - This closes issue [#469](https://github.com/pivotal-cf/om/issues/469). -- Downloading a stemcell associated with a product will try to download the light or heavy stemcell. - If anyone has experienced the recent issue with `download-product` - and the AWS heavy stemcell, - this will resolve your issue. - Please remove any custom globbing that might've been added to circumvent this issue. - For example, `stemcall-iaas: light*aws` should just be `stemcell-iaas: aws` now. -- Heavy stemcells could not be downloaded. - Support has now been added. - Define `stemcell-heavy: true` in your `download-product` config file. -- CVE update to container image. Resolves [USN-4298-1](https://usn.ubuntu.com/4298-1/). - This CVE is related to vulnerabilities with `libsqlite3`. -- CVE update to container image. Resolves [USN-4305-1](https://usn.ubuntu.com/4305-1/). - This CVE is related to vulnerabilities with `libicu60`. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now includes the option to use a local product file with `--product-path`. - - -## v4.1.11 -Released February 25, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.2.1](https://github.com/pivotal-cf/om/releases/tag/4.2.1) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- GCP [`create-vm`][create-vm] now correctly handles an empty tags list -- CVE update to container image. Resolves [USN-4274-1](https://usn.ubuntu.com/4274-1/). - The CVEs are related to vulnerabilities with `libxml2`. -- Bumped the following low-severity CVE packages: libsystemd0 libudev1 - -## v4.1.10 -Released February 7, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.2.1](https://github.com/pivotal-cf/om/releases/tag/4.2.1) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4243-1](https://usn.ubuntu.com/4243-1/). - The CVEs are related to vulnerabilities with `libbsd`. -- CVE update to container image. Resolves [USN-4249-1](https://usn.ubuntu.com/4249-1/). - The CVEs are related to vulnerabilities with `e2fsprogs`. -- CVE update to container image. Resolves [USN-4233-2](https://usn.ubuntu.com/4233-2/). - The CVEs are related to vulnerabilities with `libgnutls30`. -- CVE update to container image. Resolves [USN-4256-1](https://usn.ubuntu.com/4256-1/). - The CVEs are related to vulnerabilities with `libsasl2-2`. -- Bumped the following low-severity CVE packages: `libcom-err2`, `libext2fs2`, `libss2`, `linux-libc-dev` - -## v4.1.9 -Released January 22, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.2.1](https://github.com/pivotal-cf/om/releases/tag/4.2.1) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4236-1](https://usn.ubuntu.com/4236-1/). - The CVEs are related to vulnerabilities with `Libgcrypt`. -- CVE update to container image. Resolves [USN-4233-1](https://usn.ubuntu.com/4233-1/). - The CVEs are related to vulnerabilities with `GnuTLS`. - -## v4.1.8 -Released December 12, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.2.1](https://github.com/pivotal-cf/om/releases/tag/4.2.1) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4220-1](https://usn.ubuntu.com/4220-1/). - The CVEs are related to vulnerabilities with `git`. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v4.1.7 -Released December 3, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.2.1](https://github.com/pivotal-cf/om/releases/tag/4.2.1) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4205-1](https://usn.ubuntu.com/4205-1/). - This CVE is related to vulnerabilities with `libsqlite3`. - None of our code calls `libsqlite3` directly, but the IaaS CLIs rely on this package. -- When using the `check-pending-changes` task, - it would not work because it reference a script that did not exist. - The typo has been fixed and tested in the reference pipeline. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v4.1.5 -Released November 19, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.2.1](https://github.com/pivotal-cf/om/releases/tag/4.2.1) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4172-1](https://usn.ubuntu.com/4172-1/). - This CVE is related to vulnerabilities with `file` and `libmagic`. -- CVE update to container image. Resolves [USN-4168-1](https://usn.ubuntu.com/4168-1/). - This CVE is related to vulnerabilities with `libidn2`. -- Bump `bosh` CLI to v6.1.1 -- Bump `credhub` CLI to v2.6.1 - -### Experimental Features -- **EXPERIMENTAL** `config-template` now supports the `--config`, `--var`, `--vars-file`, and `--vars-env` flags. -- **EXPERIMENTAL** `config-template` now includes `max-in-flight` for all resources. - -## v4.1.2 -Released October 21, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [4.1.0](https://github.com/pivotal-cf/om/releases/tag/4.1.0) | - | bosh-cli | [6.1.0](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.0) | - | credhub | [2.6.0](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.0) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### What's New -- [Ops Manager config for vSphere][inputs-outputs-vsphere] now validates the required properties -- The new task [expiring-certificates] - fails if there are any expiring certificates - in a user specified time range. - Root CAs cannot be included in this list until Ops Manager 2.7. - - Example Output: - - ```text - Getting expiring certificates... - [X] Ops Manager - cf-79fba6887e8c29375eb7: - .uaa.service_provider_key_credentials: expired on 09 Aug 19 17:05 UTC - could not execute "expiring-certificates": found expiring certs in the foundation - exit status 1 - ``` - -- [Telemetry][telemetry-docs] support has been added! - To opt in, you must get the Telemetry tool from [Tanzu Network][telemetry], - create a [config file][telemetry-config], - and add the [collect-telemetry][collect-telemetry] and [send-telemetry][send-telemetry] tasks to your pipeline. - For an example, please see the [Reference Pipelines][reference-pipeline]. -- [stage-configure-apply][stage-configure-apply] task has been added. - This task will take a product, stage it, configure it, and apply changes - _only_ for that product (all other products remain unchanged). - Use this task only if you have confidence in the ordering - in which you apply-changes for your products. -- [check-pending-changes][check-pending-changes] task has been added. - This task will perform a check on Ops Manager and fail if there are pending changes. - This is useful when trying to prevent manual changes - from being applied during the automation process. -- The VM state files currently support YAML, - but when generated, JSON was outputted. - This caused confusion. - The generated state file is now outputted as YAML. - -### Deprecation Notices -- The `host` field in the vcenter section of the [vsphere opsman.yml][inputs-outputs-vsphere] has been deprecated. - Platform Automation Toolkit can initially choose where the VM is placed - but cannot guarantee that it stays there - or that other generated VMs are assigned to the same host. -- The `vpc_subnet` field in [azure_opsman.yml][inputs-outputs-azure] has been deprecated. - In your opsman.yml, replace `vpc_subnet` with `subnet_id`. - This change was to help mitigate confusion - as VPC is an AWS, not an Azure, concept. -- The optional `use_unmanaged_disk` field in [azure_opsman.yml][inputs-outputs-azure] has been deprecated. - In your opsman.yml, replace `use_unmanaged_disk: true` with `use_managed_disk: false`. - The default for `use_managed_disk` is true. - Unmanaged disk is not recommended by Azure. - If you would like to use unmanaged disks, - please opt-out by setting `use_managed_disk: false`. -- The optional `use_instance_profile` field in [aws_opsman.yml][inputs-outputs-aws] has been deprecated. - It was redundant. - When you don't specify `access_key_id` and `secret_access_key`, - the authentication will try to use the instance profile on the executing machine -- for example, a concourse worker. - This is works in conjunction of how the `aws` CLI find authentication. -- The required `security_group_id` field in [aws_opsman.yml][inputs-outputs-aws] has been deprecated. - Replace `security_group_id` with `security_group_ids` as YAML array. - For example, `security_group_id: sg-1` - becomes `security_group_ids: [ sg-1 ]`. - This allows the specification of multiple security groups to the Ops Manager VM. - -### Bug Fixes -- CVE update to container image. Resolves [USN-4151-1](https://usn.ubuntu.com/4151-1/). - This CVE is related to vulnerabilities with `python`. - None of our code calls `python` directly, but the IaaS CLIs rely on this package. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now accepts `--pivnet-file-glob` instead of `--product-file-glob`. - This is to create consistency with the `download-product` command's naming conventions. - (PR: @poligraph) - -## v4.0.16 -Released May 14, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.2.3](https://github.com/pivotal-cf/om/releases/tag/3.2.3) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- _Sometimes_ vsphere `create-vm`/`delete-vm`/`upgrade-opsman` would fail with: - `govc[stderr]: panic: send on closed channel` - due to a bug in [govc](https://github.com/vmware/govmomi/issues/1972). - - These tasks have implemented the workaround described in the issue. - -- CVE update to container image. Resolves [USN-4359-1](https://usn.ubuntu.com/4359-1/). - The CVEs are related to vulnerabilities with `apt`. - -## v4.0.14 -Released April 28, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.2.3](https://github.com/pivotal-cf/om/releases/tag/3.2.3) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4329-1](https://usn.ubuntu.com/4329-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4334-1](https://usn.ubuntu.com/4334-1/). - This CVE is related to vulnerabilities with `git`. -- CVE update to container image. Resolves [USN-4333-1](https://usn.ubuntu.com/4333-1/). - This CVE is related to vulnerabilities with `python`. -- Adding back the removed `ssh` Ubuntu package. - -## v4.0.13 -Released April 20, 2020 - -!!! bug "Known Issue" - This version attempted to remove some unnecessary dependencies from the image. - In this process, important utilities may have been removed as well. - In particular, we know that `ssh` is missing. - If you use this version and find any vital tools missing, please let us know. - A forthcoming patch version will restore `ssh` and any other identified tools. - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.2.3](https://github.com/pivotal-cf/om/releases/tag/3.2.3) | - | bosh-cli | [6.2.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.2.1) | - | credhub | [2.6.2](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.2) | - | winfs-injector | [0.16.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.16.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- The `winfs-injector` has been bumped to support the new TAS Windows tile. - When downloading a product from Pivnet, the [`download-product`][download-product] task - uses `winfs-injector` to package the Windows rootfs in the tile. - Newer version of TAS Windows, use a new packaging method, which requires this bump. - - If you see the following error, you need this fix. - - ``` - Checking if product needs winfs injected...+ '[' pas-windows == pas-windows ']' - + '[' pivnet == pivnet ']' - ++ basename downloaded-files/pas-windows-2.7.12-build.2.pivotal - + TILE_FILENAME=pas-windows-2.7.12-build.2.pivotal - + winfs-injector --input-tile downloaded-files/pas-windows-2.7.12-build.2.pivotal --output-tile downloaded-product/pas-windows-2.7.12-build.2.pivotal - open /tmp/015434627/extracted-tile/embed/windowsfs-release/src/code.cloudfoundry.org/windows2016fs/2019/IMAGE_TAG: no such file or directory - ``` - -## v4.0.12 -Released March 25, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.2.3](https://github.com/pivotal-cf/om/releases/tag/3.2.3) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.14.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.14.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- Downloading a stemcell associated with a product will try to download the light or heavy stemcell. - If anyone has experienced the recent issue with `download-product` - and the AWS heavy stemcell, - this will resolve your issue. - Please remove any custom globbing that might've been added to circumvent this issue. - For example, `stemcall-iaas: light*aws` should just be `stemcell-iaas: aws` now. -- CVE update to container image. Resolves [USN-4298-1](https://usn.ubuntu.com/4298-1/). - This CVE is related to vulnerabilities with `libsqlite3`. -- CVE update to container image. Resolves [USN-4305-1](https://usn.ubuntu.com/4305-1/). - This CVE is related to vulnerabilities with `libicu60`. - -### Experimental Features -- **EXPERIMENTAL** `config-template` now supports the `--exclude-version` flag. - If provided, the command will exclude the version directory in the `--output-directory` tree. - The contents will with or without the flag will remain the same. - Please note including the `--exclude-version` flag - will make it more difficult to track changes between versions - unless using a version control system (such as git). -- **EXPERIMENTAL** `config-template` supports `--pivnet-disable-ssl` to skip SSL validation. -- When using `config-template` (**EXPERIMENTAL**) or `download-product`, - the `--pivnet-skip-ssl` is honored when capturing the token. - -## v4.0.11 -Released February 21, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- GCP [`create-vm`][create-vm] now correctly handles an empty tags list -- CVE update to container image. Resolves [USN-4274-1](https://usn.ubuntu.com/4274-1/). - The CVEs are related to vulnerabilities with `libxml2`. -- Bumped the following low-severity CVE packages: libsystemd0 libudev1 - -## v4.0.10 -Released February 4, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4243-1](https://usn.ubuntu.com/4243-1/). - The CVEs are related to vulnerabilities with `libbsd`. -- CVE update to container image. Resolves [USN-4249-1](https://usn.ubuntu.com/4249-1/). - The CVEs are related to vulnerabilities with `e2fsprogs`. -- CVE update to container image. Resolves [USN-4233-2](https://usn.ubuntu.com/4233-2/). - The CVEs are related to vulnerabilities with `libgnutls30`. -- CVE update to container image. Resolves [USN-4256-1](https://usn.ubuntu.com/4256-1/). - The CVEs are related to vulnerabilities with `libsasl2-2`. -- Bumped the following low-severity CVE packages: `libcom-err2`, `libext2fs2`, `libss2`, `linux-libc-dev` - -## v4.0.9 -Released January 22, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4236-1](https://usn.ubuntu.com/4236-1/). - The CVEs are related to vulnerabilities with `Libgcrypt`. -- CVE update to container image. Resolves [USN-4233-1](https://usn.ubuntu.com/4233-1/). - The CVEs are related to vulnerabilities with `GnuTLS`. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v4.0.8 -Released December 12, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4220-1](https://usn.ubuntu.com/4220-1/). - The CVEs are related to vulnerabilities with `git`. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v4.0.7 -Released December 3, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4205-1](https://usn.ubuntu.com/4205-1/). - This CVE is related to vulnerabilities with `libsqlite3`. - None of our code calls `libsqlite3` directly, but the IaaS CLIs rely on this package. - -## v4.0.6 -Released November 6, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4172-1](https://usn.ubuntu.com/4172-1/). - This CVE is related to vulnerabilities with `file` and `libmagic`. -- CVE update to container image. Resolves [USN-4168-1](https://usn.ubuntu.com/4168-1/). - This CVE is related to vulnerabilities with `libidn2`. -- Bump `bosh` CLI to v6.1.1 -- Bump `credhub` CLI to v2.6.1 - -## v4.0.5 -Released October 25, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) | - | bosh-cli | [5.5.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v5.5.1) | - | credhub | [2.5.2](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.5.2) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4151-1](https://usn.ubuntu.com/4151-1/). - This CVE is related to vulnerabilities with `python`. - None of our code calls `python` directly, but the IaaS CLIs rely on this package. - -## v4.0.4 - -Released October 15, 2019, includes `om` version [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4142-1](https://usn.ubuntu.com/4142-1/). - (related to vulnerabilities with `e2fsprogs`. While none of our code directly used these, - they are present on the image.) -- Bumped the following low-severity CVE packages: `libcom-err2`, `libext2fs2`, `libss2`, `linux-libc-dev` - -## v4.0.3 - -Released September 27, 2019, includes `om` version [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4127-1](https://usn.ubuntu.com/4127-1/). - This CVE is related to vulnerabilities with `python`. - None of our code calls `python` directly, but the IaaS CLIs rely on this package. -- CVE update to container image. Resolves [USN-4129-1](https://usn.ubuntu.com/4129-1/). - (related to vulnerabilities with `curl` and `libcurl`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4132-1](https://usn.ubuntu.com/4132-1/). - (related to vulnerabilities with `expat`. While none of our code directly used these, - they are present on the image.) -- Bumped the following low-severity CVE packages: `libsystemd0`, `libudev1`, `linux-libc-dev` - -## v4.0.1 - -Released September 4, 2019, includes `om` version [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4108-1](https://usn.ubuntu.com/4108-1/). - (related to vulnerabilities with `libzstd`. While none of our code directly used these, - they are present on the image.) -- Bumped the following low-severity CVE packages: `linux-libc-dev` - -## v4.0.0 - -Released August 28, 2019, includes `om` version [3.1.0](https://github.com/pivotal-cf/om/releases/tag/3.1.0) - -### Breaking Changes - -- The tasks have been updated to extract their `bash` scripting into a separate script. - The tasks' script can be used with different CI/CD systems like Jenkins. - - This will be a breaking change if your tasks resource is not named `platform-automation-tasks`. - - For example, - - ```yaml - - get: tasks - - task: configure-authentication - file: tasks/tasks/configure-authentication.yml - ``` - - will be changed to - - ```yaml - - get: platform-automation-tasks - - task: configure-authentication - file: platform-automation-tasks/tasks/configure-authentication.yml - ``` - - Notice that the resource name changed as did the relative path to the task YAML file in `file`. - -### What's New -- [`configure-ldap-authentication`][configure-ldap-authentication], [`configure-saml-authentication`][configure-saml-authentication], and [`configure-authentication`][configure-authentication] - can create a UAA client on the Ops Manager VM. - The client_secret will be the value provided to this option `precreated-client-secret`. - This is supported in OpsManager 2.5+. -- For Ops Manager 2.6+, new task [`pre-deploy-check`][pre-deploy-check] - will validate that Ops Manager and it's staged products - are configured correctly. - This may be run at any time - and may be used as a pre-check for `apply-changes`. -- For GCP, [`create-vm`][create-vm] will now allow you - to specify a `gcp_service_account_name` - for the new Ops Manager VM. - This enables you to designate a service account name - as opposed to providing a service account json object. - This may be specified in the [Ops Manager config for GCP][inputs-outputs-gcp]. - For more information on GCP service accounts, refer to the [GCP service accounts][gcp-service-accounts] docs. -- For GCP, [`create-vm`][create-vm] supports setting `scopes` for the new Ops Manager VM. - This may be specified in the [Ops Manager config for GCP][inputs-outputs-gcp]. - For more information on setting GCP scopes, refer to the [GCP scope][gcp-scope] docs. -- [`configure-director`][configure-director] now support [VM Extensions][vm-extensions]. - *Please note this is an advanced feature, and should be used at your own discretion.* -- [`configure-director`][configure-director] now support [VM Types][vm-types]. - *Please note this is an advanced feature, and should be used at your own discretion.* -- Add support for new NSX and NSXT format in Ops Manager 2.7+ - when calling [`staged-config`][staged-config] and [`staged-director-config`][staged-director-config] -- [state][state] can now be defined in a `state-$timestamp.yml` format (like [`export-installation`][export-installation]). - This is an _opt-in_ feature, and is only recommended - if you are storing state in a non-versioned s3-compatible blobstore. - To opt-in to this feature, - a param must be added to your pipeline - and given the value of `STATE_FILE: state-$timestamp.yml` - for each invocation of the following commands: - - [`create-vm`][create-vm] - - [`delete-vm`][delete-vm] - - [`upgrade-opsman`][upgrade-opsman] -- [gcp opsman.yml][inputs-outputs-gcp] now supports `ssh_public_key`. - This is used to ssh into the Ops Manager VM to manage non-tile bosh add-ons. -- **EXPERIMENTAL** `config-template` now will provide required-vars in addition to default-vars. -- **EXPERIMENTAL** `config-template` will define vars with an `_` instead of a `/`. - This is an aesthetically motivated change. - Ops files are denoted with `/`, - so changing the vars separators to `_` makes this easier to differentiate. -- **EXPERIMENTAL** `config-template` output `product-default-vars.yml` has been changed to `default-vars.yml` - -### Bug Fixes -- [`download-product`][download-product] will now return a `download-product.json` - if `stemcell-iaas` is defined, but there is no stemcell to download for that product. -- [vsphere opsman.yml][inputs-outputs-vsphere] now requires `ssh_public_key` for Ops Manager 2.6+ - This was added to mitigate an error during upgrade - that would cause the VM to enter a reboot loop. -- When using AWS to create the Ops Manager VM with encrypted disks, - the task [`create-vm`][create-vm] and [`upgrade-opsman`][upgrade-opsman] will wait for disk encryption to be completed. - An exponential backoff will be and timeout after an hour if disk is not ready. - -## v3.0.18 -Released February 20, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- GCP [`create-vm`][create-vm] now correctly handles an empty tags list -- CVE update to container image. Resolves [USN-4274-1](https://usn.ubuntu.com/4274-1/). - The CVEs are related to vulnerabilities with `libxml2`. -- Bumped the following low-severity CVE packages: libsystemd0 libudev1 - -## v3.0.17 -Released February 3, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - - The full Docker image-receipt: Download - -### Bug Fixes -- CVE update to container image. Resolves [USN-4243-1](https://usn.ubuntu.com/4243-1/). - The CVEs are related to vulnerabilities with `libbsd`. -- CVE update to container image. Resolves [USN-4249-1](https://usn.ubuntu.com/4249-1/). - The CVEs are related to vulnerabilities with `e2fsprogs`. -- CVE update to container image. Resolves [USN-4233-2](https://usn.ubuntu.com/4233-2/). - The CVEs are related to vulnerabilities with `libgnutls30`. -- CVE update to container image. Resolves [USN-4256-1](https://usn.ubuntu.com/4256-1/). - The CVEs are related to vulnerabilities with `libsasl2-2`. -- Bumped the following low-severity CVE packages: `libcom-err2`, `libext2fs2`, `libss2`, `linux-libc-dev` - -## v3.0.16 -Released January 28, 2020 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4236-1](https://usn.ubuntu.com/4236-1/). - The CVEs are related to vulnerabilities with `Libgcrypt`. -- CVE update to container image. Resolves [USN-4233-1](https://usn.ubuntu.com/4233-1/). - The CVEs are related to vulnerabilities with `GnuTLS`. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v3.0.15 -Released December 12, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4220-1](https://usn.ubuntu.com/4220-1/). - The CVEs are related to vulnerabilities with `git`. -- Bumped the following low-severity CVE package: `linux-libc-dev` - -## v3.0.14 -Released December 3, 2019 - -??? info "CLI Versions" - - | Name | version | - |---|---| - | om | [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) | - | bosh-cli | [6.1.1](https://github.com/cloudfoundry/bosh-cli/releases/tag/v6.1.1) | - | credhub | [2.6.1](https://github.com/cloudfoundry-incubator/credhub-cli/releases/tag/2.6.1) | - | winfs-injector | [0.13.0](https://github.com/pivotal-cf/winfs-injector/releases/tag/0.13.0) | - -### Bug Fixes -- CVE update to container image. Resolves [USN-4205-1](https://usn.ubuntu.com/4205-1/). - This CVE is related to vulnerabilities with `libsqlite3`. - None of our code calls `libsqlite3` directly, but the IaaS CLIs rely on this package. - -## v3.0.13 -Released November 14, 2019, includes `om` version [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4172-1](https://usn.ubuntu.com/4172-1/). - This CVE is related to vulnerabilities with `file` and `libmagic`. -- CVE update to container image. Resolves [USN-4168-1](https://usn.ubuntu.com/4168-1/). - This CVE is related to vulnerabilities with `libidn2`. -- Bump `bosh` CLI to v6.1.1 -- Bump `credhub` CLI to v2.6.1 - -## v3.0.12 -Released October 25, 2019, includes `om` version [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4151-1](https://usn.ubuntu.com/4151-1/). - This CVE is related to vulnerabilities with `python`. - None of our code calls `python` directly, but the IaaS CLIs rely on this package. - -## v3.0.11 - -Released October 15, 2019, includes `om` version [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4142-1](https://usn.ubuntu.com/4142-1/). - (related to vulnerabilities with `e2fsprogs`. While none of our code directly used these, - they are present on the image.) -- Bumped the following low-severity CVE packages: `libcom-err2`, `libext2fs2`, `libss2`, `linux-libc-dev` - -## v3.0.10 -Released September 26, 2019, includes `om` version [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4127-1](https://usn.ubuntu.com/4127-1/). - This CVE is related to vulnerabilities with `python`. - None of our code calls `python` directly, but the IaaS CLIs rely on this package. -- CVE update to container image. Resolves [USN-4129-1](https://usn.ubuntu.com/4129-1/). - (related to vulnerabilities with `curl` and `libcurl`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4132-1](https://usn.ubuntu.com/4132-1/). - (related to vulnerabilities with `expat`. While none of our code directly used these, - they are present on the image.) -- Bumped the following low-severity CVE packages: `libsystemd0`, `libudev1`, `linux-libc-dev` - -## v3.0.8 -Released September 4, 2019, includes `om` version [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4108-1](https://usn.ubuntu.com/4108-1/). - (related to vulnerabilities with `libzstd`. While none of our code directly used these, - they are present on the image.) -- Bumped the following low-severity CVE packages: - `libpython2.7`, `libpython2.7-dev`, `libpython2.7-minimal`, `libpython2.7-stdlib`, `libssl1.1` - `openssl`, `python-cryptography`, `python2.7`, `python2.7-dev`, `python2.7-minimal` - -## v3.0.7 -Released August 28, 2019, includes `om` version [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) - -### Bug Fixes -- When using AWS to create the Ops Manager VM with encrypted disks, - the task [`create-vm`][create-vm] and [`upgrade-opsman`][upgrade-opsman] will wait for disk encryption to be completed. - An exponential backoff will be and timeout after an hour if disk is not ready. -- CVE update to container image. Resolves [USN-4071-1](https://usn.ubuntu.com/4071-1/). - (related to vulnerabilities with `patch`. While none of our code directly used these, - they are present on the image.) -- Bumped the following low-severity CVE packages: - `linux-libc-dev`, `libldap-2.4-2`, `libldap-common`, `linux-libc-dev` - -## v3.0.5 -Released July 22, 2019, includes `om` version [3.0.0](https://github.com/pivotal-cf/om/releases/tag/3.0.0) - -### Bug Fixes -- in [`credhub-interpolate`][credhub-interpolate], [`upload-product`][upload-product], and [`upload-stemcell`][upload-stemcell] - setting `SKIP_MISSING: false` the command would fail. - This has been fixed. -- [`upgrade-opsman`][upgrade-opsman] would fail on the [`import-installation`][import-installation] step - if the env file did not contain a target or decryption passphrase. - This will now fail before the upgrade process begins - to ensure faster feedback. -- [`upgrade-opsman`][upgrade-opsman] now respects environment variables - when it makes calls internally to `om` - (env file still required). -- `download-product-s3` does not require `pivnet-api-token` anymore. -- `om` CLI has been bumped to v3.0.0. - This includes the following bug fixes: - * `apply-changes --product ` will error with _product not found_ if that product has not been staged. - * `upload-stemcell` now accepts `--floating false` in addition to `floating=false`. - This was done to offer consistency between all of the flags on the command. - * `skip-unchanged-products` was removed from `apply-changes`. - This option has had issues with consistent successful behaviour. - For example, if the apply changes fails for any reason, the subsequent apply changes cannot pick where it left off. - This usually happens in the case of errands that are used for services. - - We are working on scoping a selective deploy feature that makes sense for users. - We would love to have feedback from users about this. - - * remove `revert-staged-changes` - `unstage-product` functionally does the same thing, - but uses the API. -- Bumped the following low-severity CVE packages: `unzip` - -## v3.0.4 -Released July 11, 2019, includes `om` version [2.0.0](https://github.com/pivotal-cf/om/releases/tag/2.0.0) - -### Bug Fixes -- Both [`configure-ldap-authentication`][configure-ldap-authentication] - and [`configure-saml-authentication`][configure-saml-authentication] - will now automatically - create a BOSH UAA admin client as documented [here](https://docs.pivotal.io/pivotalcf/2-5/customizing/opsmanager-create-bosh-client.html#saml). - This is only supported in OpsManager 2.4 and greater. - You may specify the option `skip-create-bosh-admin-client` in your config YAML - to skip creating this client. - After the client has been created, - you can find the client ID and secret - by following [steps three and four found here](https://docs.pivotal.io/pivotalcf/2-5/customizing/opsmanager-create-bosh-client.html#-provision-admin-client). - - _This feature needs to be enabled - to properly automate authentication for the bosh director when using LDAP and SAML._ - If `skip-create-bosh-admin-client: true` is specified, manual steps are required, - and this task is no longer "automation". - -- [`create-vm`][create-vm] and [`upgrade-opsman`][upgrade-opsman] now function with `gcp_service_account_name` on GCP. - Previously, only providing a full `gcp_service_account` as a JSON blob worked. -- Environment variables passed to [`create-vm`][create-vm], [`delete-vm`][delete-vm], and [`upgrade-opsman`][upgrade-opsman] - will be passed to the underlying IAAS CLI invocation. - This allows our tasks to work with the `https_proxy` and `no_proxy` variables - that can be [set in Concourse](https://github.com/concourse/concourse-bosh-release/blob/9764b66a6d85785735f6ea8ddcabf77785b5eddd/jobs/worker/spec#L50-L65). -- [`download-product`][download-product] task output of `assign-stemcell.yml` will have the correct `product-name` -- When using the `env.yml` for a task, - extra values passed in the env file will now fail if they are not recognized properties. - Invalid properties might now produce the following: - ```bash - $ om --env env.yml upload-product --product product.pivotal - could not parse env file: yaml: unmarshal errors: - line 5: field invalid-field not found in type main.options - ``` - -- `credhub` CLI has been bumped to v2.5.1. - This includes a fix of not raising an error when processing an empty YAML file. -- `om` CLI has been bumped to v2.0.0. - This includes the following bug fixes: - * `download-product` will now return a `download-file.json` - if `stemcell-iaas` is defined but the product has no stemcell. - Previously, this would exit gracefully, but not return a file. - * Non-string environment variables can now be read and passed as strings to Ops Manager. - For example, if your environment variable (`OM_NAME`) is set to `"123"` (with quotes escaped), - it will be evaluated in your config file with the quotes. - - Given `config.yml` - ```yaml - value: ((NAME)) - ``` - - `om interpolate -c config.yml --vars-env OM` - - Will evaluate to: - ```yaml - value: "123" - ``` - - * `bosh-env` will now set `BOSH_ALL_PROXY` without a trailing slash if one is provided - * When using `bosh-env`, a check is done to ensure the SSH private key exists. - If does not the command will exit 1. - * `config-template` will enforce the default value for a property to always be `configurable: false`. - This is inline with the OpsManager behaviour. - -- CVE update to container image. Resolves [USN-4040-1](https://usn.ubuntu.com/4040-1/). - (related to vulnerabilities with `Expat`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4038-1](https://usn.ubuntu.com/4038-1/) and [USN-4038-3](https://usn.ubuntu.com/4038-3/). - (related to vulnerabilities with `bzip`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4019-1](https://usn.ubuntu.com/4019-1/). - (related to vulnerabilities with `SQLite`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [CVE-2019-11477](https://people.canonical.com/~ubuntu-security/cve/2019/CVE-2019-11477.html). - (related to vulnerabilities with `linux-libc-dev`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4049-1](https://usn.ubuntu.com/4049-1/). - (related to vulnerabilities with `libglib`. While none of our code directly used these, - they are present on the image.) - -## v3.0.2 -Released July 8, 2019, includes `om` version [1.0.0](https://github.com/pivotal-cf/om/releases/tag/1.0.0) - -### Bug Fixes -- CVE update to container image. Resolves [USN-4014-1](https://usn.ubuntu.com/4014-1/). - (related to vulnerabilities with `GLib`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4015-1](https://usn.ubuntu.com/4015-1/). - (related to vulnerabilities with `DBus`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-3999-1](https://usn.ubuntu.com/3999-1/). - (related to vulnerabilities with `GnuTLS`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4001-1](https://usn.ubuntu.com/4001-1/). - (related to vulnerabilities with `libseccomp`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-4004-1](https://usn.ubuntu.com/4004-1/). - (related to vulnerabilities with `Berkeley DB`. While none of our code directly used these, - they are present on the image.) -- CVE update to container image. Resolves [USN-3993-1](https://usn.ubuntu.com/3993-1/). - (related to vulnerabilities with `curl`. While none of our code directly used these, - they are present on the image.) - -## v3.0.1 -Released May 24, 2019, includes `om` version [1.0.0](https://github.com/pivotal-cf/om/releases/tag/1.0.0) - -### Breaking Changes -- `om` will now follow conventional Semantic Versioning, - with breaking changes in major bumps, - non-breaking changes for minor bumps, - and bug fixes for patches. -- The [`credhub-interpolate`][credhub-interpolate] task can have multiple - interpolation paths. The `INTERPOLATION_PATH` param is now plural: `INTERPOLATION_PATHS`. - IF you are using a custom `INTERPOLATION_PATH` for `credhub-interpolate`, you will need to update - your `pipeline.yml` to this new param. - As an example, if your credhub-interpolate job is defined as so: -```yaml -# OLD pipeline.yml PRIOR TO 3.0.0 RELEASE -- name: example-credhub-interpolate - plan: - - get: platform-automation-tasks - - get: platform-automation-image - - get: config - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - input_mapping: - files: config - params: - # all required - CREDHUB_CA_CERT: ((credhub_ca_cert)) - CREDHUB_CLIENT: ((credhub_client)) - CREDHUB_SECRET: ((credhub_secret)) - CREDHUB_SERVER: ((credhub_server)) - PREFIX: /private-foundation - INTERPOLATION_PATH: foundation/config-path - SKIP_MISSING: true -``` - it should now look like -```yaml hl_lines="19" -# NEW pipeline.yml FOR 3.0.0 RELEASE -- name: example-credhub-interpolate - plan: - - get: platform-automation-tasks - - get: platform-automation-image - - get: config - - task: credhub-interpolate - image: platform-automation-image - file: platform-automation-tasks/tasks/credhub-interpolate.yml - input_mapping: - files: config - params: - # all required - CREDHUB_CA_CERT: ((credhub_ca_cert)) - CREDHUB_CLIENT: ((credhub_client)) - CREDHUB_SECRET: ((credhub_secret)) - CREDHUB_SERVER: ((credhub_server)) - PREFIX: /private-foundation - INTERPOLATION_PATHS: foundation/config-path - SKIP_MISSING: true -``` - -- the [`upload-product`][upload-product] option `--sha256` has been changed to `--shasum`. - IF you are using the `--config` flag in `upload-product`, your config file will need to update from: -```yaml -# OLD upload-product-config.yml PRIOR TO 3.0.0 RELEASE -product-version: 1.2.3-build.4 -sha256: 6daededd8fb4c341d0cd437a -``` - to: -```yaml hl_lines="3" -# NEW upload-product-config.yml FOR 3.0.0 RELEASE -product-version: 1.2.3-build.4 -shasum: 6daededd8fb4c341d0cd437a # NOTE the name of this value is changed -``` - This change was added to future-proof the param name for when sha256 is no longer the - de facto way of defining shasums. - -### What's New -- The new command [`assign-multi-stemcell`][assign-multi-stemcell] assigns multiple stemcells to a provided product. - This feature is only available in OpsMan 2.6+. -- [`download-product`][download-product] ensures sha sum checking when downloading the file from Tanzu Network. -- [`download-product`][download-product] can now disable ssl validation when connecting to Tanzu Network. - This helps with environments with SSL and proxying issues. - Add `pivnet-disable-ssl: true` in your [download-product-config][download-product-config] to use this feature. -- On [GCP][inputs-outputs-gcp], if you did not assign a public IP, Google would assign - one for you. This has been changed to only assign a public IP if defined in your `opsman.yml`. -- On [Azure][inputs-outputs-azure], if you did not assign a public IP, Azure would assign - one for you. This has been changed to only assign a public IP if defined in your `opsman.yml`. -- `om interpolate` (example in the [test task][test-interpolate]) now supports - the ability to accept partial vars files. This is added support for users who may also be using - credhub-interpolate or who want to mix interpolation methods. To make use of this feature, include - the `--skip-missing` flag. -- [`credhub-interpolate`][credhub-interpolate] now supports the `SKIP_MISSING` - parameter. For more information on how to use this feature and if it fits for your foundation(s), see the - [Secrets Handling][secrets-handling-multiple-sources] section. -- the [reference pipeline][reference-pipeline] has been updated to give an example of - [`credhub-interpolate`][credhub-interpolate] in practice. For more information - about credhub, see [Secrets Handling][secrets-handling-multiple-sources] -- `om` now has support for `config-template` (a Platform Automation Toolkit encouraged replacement of - `tile-config-generator`). This is an experimental command that can only be run currently using `docker run`. - For more information and instruction on how to use `config-template`, please see - [Creating a Product Config File][config-template]. -- [`upload-stemcell`][upload-stemcell] now supports the ability to include a config file. - This allows you to define an expected `shasum` that will validate the calculated shasum of the provided - `stemcell` uploaded in the task. This was added to give feature parity with [`upload-product`][upload-product] -- [Azure][inputs-outputs-azure] now allows NSG(network security group) to be optional. - This change was made because NSGs can be assigned at the subnet level rather than just the VM level. This - param is also not required by the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/vm?view=azure-cli-latest). - Platform Automation Toolkit now reflects this. -- [staged-director-config][staged-director-config] now supports returning multiple IaaS - configurations. `iaas-configurations` is a top level key returned in Ops Manager 2.2+. If using an Ops - Manager 2.1 or earlier, `iaas_configuration` will continue to be a key nested under `properties-configuration`. -- [configure-director][configure-director] now supports setting multiple IaaS configurations. - If using this feature, be sure to use the top-level `iaas-configurations` key, rather than the nested - `properties-configuration.iaas_configuration` key. If using a single IaaS, `properties-configuration.iaas_configuration` - is still supported, but the new `iaas_configurations` top-level key is recommended. - - ```yaml hl_lines="2" - # Configuration for 2.2+ - iaas-configurations: - - additional_cloud_properties: {} - name: ((iaas-configurations_0_name)) - - additional_cloud_properties: {} - name: ((iaas-configurations_1_name)) - ... - networks-configuration: ... - properties-configuration: ... - ``` - - ```yaml hl_lines="5" - # Configuration 2.1 and earlier - networks-configuration: ... - properties-configuration: - director_configuration: ... - iaas_configuration: - additional_cloud_properties: {} - name: ((iaas-configurations_0_name)) - ... - security_configuration: ... - ``` - -### Bug Fixes -- OpenStack would sometimes be unable to associate the public IP when creating the VM, because it was - waiting for the VM to come up. The `--wait` flag has been added to validate that the VM creation is - complete before more work is done to the VM. -- [`credhub-interpolate`][credhub-interpolate] now accepts multiple files for the `INTERPOLATION_PATHS`. -- CVE update to container image. Resolves [USN-3911-1](https://usn.ubuntu.com/3911-1/). - (related to vulnerabilities with `libmagic1`. While none of our code directly used these, - they are present on the image.) -- Improved error messaging for [vSphere][inputs-outputs-vsphere] VM creation if neither `ssh-password` or `ssh-public-key` are set. - One or the other is required to create a VM. - -{% include ".internal_link_url.md" %} -{% include ".external_link_url.md" %} diff --git a/docs/report-an-issue.md b/docs/report-an-issue.md deleted file mode 100644 index dea30224..00000000 --- a/docs/report-an-issue.md +++ /dev/null @@ -1,4 +0,0 @@ -To report an issue, reach out to your primary VMware contact or [VMware Support][support]. - -{% include ".internal_link_url.md" %} -{% include ".external_link_url.md" %} diff --git a/docs/tasks.html.md.erb b/docs/tasks.html.md.erb new file mode 100644 index 00000000..7b2df368 --- /dev/null +++ b/docs/tasks.html.md.erb @@ -0,0 +1,1782 @@ +# Platform Automation Toolkit Tasks + + +This document lists each Platform Automation Toolkit task, +and provides information about their intentions, inputs, and outputs. + +The tasks are presented, in their entirety, +as they are found in the product. + +## Task types + +The Docker image can be used to invoke the commands in each task locally. +Use `--help` for more information. +To learn more see [Running commands locally](./how-to-guides/running-commands-locally.html). + +

+The inputs, outputs, params, filename, and filepath +of this task file are part of its semantically versioned API. +See our documentation for a detailed discussion of our semver API. +See www.semver.org for an explanation of semantic versioning. +

+ +### activate-certificate-authority + +Ensures that the newest certificate authority on Tanzu Operations Manager is active. + +**Task** + +<%= partial "tasks/activate-certificate-authority" %> + +**Implementation** + +<%= partial "tasks/activate-certificate-authority-script" %> + +**Usage** + +```yaml +- task: activate-new-ca + image: platform-automation-image + file: platform-automation-tasks/tasks/activate-certificate-authority.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### apply-changes + +Triggers an install on the Tanzu Operations Manager described by the auth file. + +To optionally provide an errand file to manually control errands +for a particular of run of `apply-changes`. +To see an example of this config file, see [Inputs and outputs](./inputs-outputs.html). + +<%= partial "disable-verifiers" %> + +**Task** + +<%= partial "tasks/apply-changes" %> + +**Implementation** + +<%= partial "tasks/apply-changes-script" %> + +**Usage** + +```yaml +- task: apply-product-changes + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### apply-director-changes + +`apply-changes` can also be used to trigger an install for just the BOSH Director +with the `--skip-deploy-products`/`-sdp` flag. + +<%= partial "disable-verifiers" %> + +**Task** + +<%= partial "tasks/apply-director-changes" %> + +**Implementation** + +<%= partial "tasks/apply-director-changes-script" %> + +**Usage** + +```yaml +- task: apply-product-changes + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/apply-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### assign-multi-stemcell + +`assign-multi-stemcell` assigns multiple stemcells to a provided product. +For more information about how to utilize this workflow, +see [Stemcell Handling](./concepts/stemcell-handling.html). + +**Task** + +<%= partial "tasks/assign-multi-stemcell" %> + +**Implementation** + +<%= partial "tasks/assign-multi-stemcell-script" %> + +**Usage** + +```yaml +- task: assign-multi-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/assign-multi-stemcell.yml + params: + ENV_FILE: ((foundation))/env/env.yml +``` + +### assign-stemcell + +`assign-stemcell` assigns a stemcell to a provided product. +For more information about how to use +this workflow, see [Stemcell Handling](./concepts/stemcell-handling.html). + +**Task** + +<%= partial "tasks/assign-stemcell" %> + +**Implementation** + +<%= partial "tasks/assign-stemcell-script" %> + +**Usage** + +```yaml +- task: assign-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/assign-stemcell.yml + params: + ENV_FILE: ((foundation))/env/env.yml +``` + +### backup-director + +Use BBR to backup a BOSH Director deployed with Tanzu Operations Manager. + +**Task** + +<%= partial "tasks/backup-director" %> + +**Implementation** + +<%= partial "tasks/backup-director-script" %> + +**Usage** + +```yaml +- task: backup-director + image: platform-automation-image + file: platform-automation-tasks/tasks/backup-director.yml + params: + OPSMAN_SSH_PRIVATE_KEY: ((vsphere_private_ssh_key)) +``` + +### backup-product + +Use BBR to backup a product deployed with Tanzu Operations Manager. + +**Task** + +<%= partial "tasks/backup-product" %> + +**Implementation** + +<%= partial "tasks/backup-product-script" %> + +**Usage** + +```yaml +- task: backup-product + image: platform-automation-image + file: platform-automation-tasks/tasks/backup-product.yml + params: + PRODUCT_NAME: cf + ENV_FILE: env.yml + OPSMAN_SSH_PRIVATE_KEY: ((opsman-ssh-private-key)) +``` + +### backup-tkgi + +Use BBR to backup Tanzu Kubernetes Grid Integrated Edition (TKGI) +deployed with Tanzu Operations Manager. + +

+PKS CLI may be temporarily unavailable: +During the backup, the PKS CLI is disabled. +Due to the nature of the backup, some commands may not work as expected.

+ +
+Known issue: +When using the task backup-tkgi behind a proxy, +the values for no_proxy can affect the SSH (though jumpbox) tunneling. +When the task invokes the bbr CLI, an environment variable (BOSH_ALL_PROXY) has been set, +this environment variable tries to honor the no_proxy settings. +The task's usage of the SSH tunnel requires the no_proxy to not be set. +
+If you experience an error, such as an SSH connection refused or connection timeout, +try setting the no_proxy: "" as params on the task. +
+For example, + +
+
+- task: backup-tkgi
+    file: platform-automation-tasks/tasks/backup-tkgi.yml
+    params:
+    no_proxy: ""
+
+
+
+ +**Task** + +<%= partial "tasks/backup-tkgi" %> + +**Implementation** + +<%= partial "tasks/backup-tkgi-script" %> + +**Usage** + +```yaml +- task: backup-tkgi + image: platform-automation-image + file: platform-automation-tasks/tasks/backup-tkgi.yml + params: + ENV_FILE: env.yml + OPSMAN_SSH_PRIVATE_KEY: ((opsman-ssh-private-key)) +``` + +### check-pending-changes + +Returns a table of the current state of your Tanzu Operations Manager +and lists whether each product is changed or unchanged and the errands for that product. +By default, `ALLOW_PENDING_CHANGES: false` will force the task to fail. +This is useful to keep manual changes from being accidentally applied +when automating the [configure-product](#configure-product)/[apply-changes](#apply-changes) of other products. + +**Task** + +<%= partial "tasks/check-pending-changes" %> + +**Implementation** + +<%= partial "tasks/check-pending-changes-script" %> + +**Usage** + +```yaml +- task: check-pending-changes + image: platform-automation-image + file: platform-automation-tasks/tasks/check-pending-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + ALLOW_PENDING_CHANGES: true +``` + +### collect-telemetry + +Collects foundation information +using the [Tanzu Telemetry for Tanzu Operations Manager](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform-services/telemetry-for-tanzu/2-2/telemetry-tanzu/index.html) tool. + +This task requires the `telemetry-collector-binary` as an input. +The binary is available on the [Broadcom Support portal](https://support.broadcom.com/group/ecx/productdownloads?subfamily=Telemetry%20for%20VMware%20Tanzu%20%5BVMs%5D); +you must define a `resource` to supply the binary. + +This task requires a [config file](./inputs-outputs.html#telemetry). + +After using this task, use +the [send-telemetry](#send-telemetry) task +to send telemetry data to VMware. + +**Task** + +<%= partial "tasks/collect-telemetry" %> + +**Implementation** + +<%= partial "tasks/collect-telemetry-script" %> + +**Usage** + +```yaml +- task: collect-telemetry-data + image: platform-automation-image + file: platform-automation-tasks/tasks/collect-telemetry.yml + input_mapping: + env: configuration + config: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/telemetry.yml + ENV_FILE: foundations/config/env.yml +``` + +### configure-authentication + +Configures Tanzu Operations Manager with an internal userstore and admin user account. +See [configure-saml-authentication](#configure-saml-authentication) to configure an external SAML user store, +and [configure-ldap-authentication](#configure-ldap-authentication) to configure with LDAP. + +**Task** + +<%= partial "../tasks/configure-authentication" %> + +**Implementation** + +<%= partial "../tasks/configure-authentication-script" %> + +**Usage** + +```yaml +- task: configure-authentication + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-authentication.yml + attempts: 10 + input_mapping: + env: configuration + config: configuration + params: + ENV_FILE: foundations/config/env.yml + AUTH_CONFIG_FILE: foundations/config/auth.yml +``` + +For details on the config file expected in the `config` input, +see [Generating an Auth file](./how-to-guides/configuring-auth.html). + +### configure-director + +Configures the BOSH Director with settings from a config file. +See [staged-director-config](#staged-director-config), +which can extract a config file. + +**Task** + +<%= partial "../tasks/configure-director" %> + +**Implementation** + +<%= partial "../tasks/configure-director-script" %> + +**Usage** + +```yaml +- task: configure-director + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-director.yml + input_mapping: + config: configuration + env: configuration +``` + +

+GCP with service account: +For GCP, if service account is used, the property associated_service_account has to be set explicitly in the iaas_configuration section.

+ +### configure-ldap-authentication + +Configures Tanzu Operations Manager with an external LDAP user store and admin user account. +See [configure-authentication](#configure-authentication) to configure an internal user store, +and [configure-saml-authentication](#configure-saml-authentication) to configure with SAML. + +**Task** + +<%= partial "../tasks/configure-ldap-authentication" %> + +**Implementation** + +<%= partial "../tasks/configure-ldap-authentication-script" %> + +**Usage** + +```yaml +- task: configure-ldap-authentication + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-ldap-authentication.yml + params: + ENV_FILE: ((foundation))/env/env.yml + AUTH_CONFIG_FILE: ((foundation))/auth/auth.yml +``` + +For more details about using LDAP, +see [Tanzu Operations Manager documentation](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/pcf-interface.html#ldap). + +For details about the config file expected in the `config` input, +please see [Generating an Auth file](./how-to-guides/configuring-auth.html). + +### configure-opsman + +This task supports configuring settings +on the Tanzu Operations Manager Settings page in the UI. +For example, the SSL cert for the Tanzu Operations Manager VM can be configured. + +Configuration can be added directly to [`opsman.yml`](./inputs-outputs.html#tanzu-operations-manager-config). +An example of all configurable properties can be found in the "Additional Settings" tab. + +The [`upgrade-opsman`](#upgrade-opsman) task will automatically call `configure-opsman`, +so there is no need to use this task then. +It is recommended to use this task in the initial setup of the Tanzu Operations Manager VM. + +**Task** + +<%= partial "../tasks/configure-opsman" %> + +**Implementation** + +<%= partial "../tasks/configure-opsman-script" %> + +**Usage** + +```yaml +- task: configure-ldap-authentication + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-ldap-authentication.yml + params: + ENV_FILE: ((foundation))/env/env.yml + AUTH_CONFIG_FILE: ((foundation))/auth/auth.yml +``` + +### configure-product + +Configures an individual, staged product with settings from a config file. + +Not to be confused with Tanzu Operations Manager's +built-in [import](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-backup-restore-restore-pcf-bbr.html#deploy-import), +which reads all deployed products and configurations from a single opaque file, +intended for import as part of backup/restore and upgrade lifecycle processes. + +See [staged-config](#staged-config), +which can extract a config file, +and [upload-and-stage-product](#upload-and-stage-product), +which can stage a product that's been uploaded. + +**Task** + +<%= partial "../tasks/configure-product" %> + +**Implementation** + +<%= partial "../tasks/configure-product-script" %> + +**Usage** + +```yaml +- task: configure-pks + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-product.yml + input_mapping: + config: configuration + env: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/pks.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + vars/foundations/((foundation))/vars/pks.yml +``` + +### configure-new-certificate-authority + +

Creates a new certificate authority on Tanzu Operations Manager. This can either create a +new CA using CredHub or create a new CA using a provided certificate and +private key in PEM format via the certs/ input.

+ +**Task** + +<%= partial "../tasks/configure-new-certificate-authority" %> + +**Implementation** + + + +```bash +cat /var/version && echo "" +set -eux + +if [[ -f "certs/certificate.pem" && -f "certs/privatekey.pem" ]]; then + om --env env/"${ENV_FILE}" create-certificate-authority \ + --format json \ + --certificate-pem "$(configure-saml-authentication + +Configures Tanzu Operations Manager with an external SAML user store and admin user account. +See [configure-authentication](#configure-authentication) to configure an internal user store, +and [configure-ldap-authentication](#configure-ldap-authentication) to configure with LDAP. + +**Task** + +<%= partial "../tasks/configure-saml-authentication" %> + +**Implementation** + +<%= partial "../tasks/configure-saml-authentication-script" %> + +**Usage** + +```yaml +- task: configure-saml-authentication + image: platform-automation-image + file: platform-automation-tasks/tasks/configure-saml-authentication.yml + params: + ENV_FILE: ((foundation))/env/env.yml + AUTH_CONFIG_FILE: ((foundation))/auth/auth.yml +``` + +
+BOSH admin client: +By default, this task creates a BOSH admin client. +This is helpful for some advanced workflows +that involve communicating directly with the BOSH Director. +It is possible to disable this behavior; see the +config file documentation +for details. +
+ +Configuring SAML has two different auth flows for the UI and the task. +The UI will have a browser based login flow. +The CLI will require `client-id` and `client-secret` as it cannot do a browser login flow. + +For more details on using SAML, +see [Tanzu Operations Manager documentation](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/opsguide-config-rbac.html#enable-saml). + +For details on the config file expected in the `config` input, +please see [Generating an Auth file](./how-to-guides/configuring-auth.html). + +### create-vm + +Creates an unconfigured Tanzu Operations Manager VM. + +**Task** + +<%= partial "../tasks/create-vm" %> + +**Implementation** + +<%= partial "../tasks/create-vm-script" %> + +**Usage** + +```yaml +- task: create-vm + image: platform-automation-image + file: platform-automation-tasks/tasks/create-vm.yml + input_mapping: + image: opsman-image + config: configuration + vars: configuration + params: + OPSMAN_CONFIG_FILE: foundations/((foundation))/config/opsman.yml + STATE_FILE: state-((foundation)).yml + VARS_FILES: vars/foundations/((foundation))/vars/director.yml + ensure: &put-state + do: + - put: state + params: + file: generated-state/state-((foundation)).yml +``` + +This task requires a config file specific to the IaaS being deployed to. +See the [configuration](./inputs-outputs.html#tanzu-operations-manager-config) page for more specific examples. + +The task does specific CLI commands for the creation of the Tanzu Operations Manager VM on each IAAS. See the following for more information: + +**AWS** + +1. Requires the image YAML file from the Broadcom Support portal +2. Validates the existence of the VM if defined in the statefile, if so do nothing +3. Chooses the correct AMI to use based on the provided image YAML file from the Broadcom Support portal +4. Creates the VM configured using the opsman config and the image YAML. This only attaches existing infrastructure to a newly created VM. This does not create any new resources +5. The public IP address, if provided, is assigned after successful creation + +**Azure** + +1. Requires the image YAML file from the Broadcom Support portal +1. Validates the existence of the VM if defined in the statefile, if so do nothing +1. Copies the image (of the OpsMan VM from the specified region) as a blob into the specified storage account +1. Creates the Tanzu Operations Manager image +1. Creates a VM from the image. This will use unmanaged disk (if specified), and assign a public and/or private IP. This only attaches existing infrastructure to a newly createdVM. This does not create any new resources. + +**GCP** + +1. Requires the image YAML file from the Broadcom Support portal +1. Validates the existence of the VM if defined in the statefile, if so do nothing +1. Creates a compute image based on the region specific Tanzu Operations Manager source URI in the specified Tanzu Operations Manager account +1. Creates a VM from the image. This will assign a public and/or private IP address, VM sizing, and tags. This does not create any new resources. + +**OpenStack** + +1. Requires the image YAML file from the Broadcom Support portal +1. Validates the existence of the VM if defined in the statefile; if so do nothing +1. Recreates the image in OpenStack, if it already exists, to validate that you are using the correct version of the image +1. Creates a VM from the image. This does not create any new resources +1. The public IP address, if provided, is assigned after successful creation + +**vSphere** + +1. Requires the OVA image from the Broadcom Support portal +1. Validates the existence of the VM if defined in the statefile, if so do nothing +1. Build ipath from the provided datacenter, folder, and VM name (vmname) provided in the config file. The created VM is stored on the generated path. If folder is not provided, the VM will be placed in the datacenter. +1. Creates a VM from the image provided to the `create-vm` command. This does not create any new resources + +### credhub-interpolate + +Interpolate CredHub entries into configuration files. + +

+Deprecation Notice: +The credhub-interpolate task will be deprecated in future major versions of Platform Automation Toolkit.

+ +

+The prepare-tasks-with-secrets task replaces the credhub-interpolate task on Concourse versions 5.x+ +and provides additional benefits.

+ +**Task** + +<%= partial "../tasks/credhub-interpolate" %> + +**Implementation** + +<%= partial "../tasks/credhub-interpolate-script" %> + +**Usage** + +```yaml +- task: interpolate-env-creds + image: platform-automation-image + file: platform-automation-tasks/tasks/credhub-interpolate.yml + params: + CREDHUB_CLIENT: ((credhub-client)) + CREDHUB_SECRET: ((credhub-secret)) + CREDHUB_SERVER: ((credhub-server)) + PREFIX: '/pipeline/vsphere' + INTERPOLATION_PATHS: ((foundation))/config + SKIP_MISSING: true + input_mapping: + files: configuration + output_mapping: + interpolated-files: interpolated-configs +``` + +This task requires a valid credhub with UAA client and secret. For information about how to +set this up, see [Using a secrets store to store credentials](./concepts/secrets-handling.html). + +### delete-certificate-authority + +Deletes all inactive certificate authorities from the Tanzu Operations Manager. + +**Task** + +<%= partial "../tasks/delete-certificate-authority" %> + +**Implementation** + +<%= partial "../tasks/delete-certificate-authority-script" %> + +**Usage** + +```yaml +- task: delete-certificate-authority + image: platform-automation-image + file: platform-automation-tasks/tasks/delete-certificate-authority.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### delete-installation + +Deletes the Tanzu Operations Manager installation. + +**Task** + +<%= partial "../tasks/delete-installation" %> + +**Implementation** + +<%= partial "../tasks/delete-installation-script" %> + +**Usage** + +```yaml +- task: delete-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/delete-installation.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### delete-vm + +Deletes the Tanzu Operations Manager VM instantiated by [create-vm](#create-vm). + +**Task** + +<%= partial "../tasks/delete-vm" %> + +**Implementation** + +<%= partial "../tasks/delete-vm-script" %> + +**Usage** + +```yaml +- task: delete-vm + image: platform-automation-image + file: platform-automation-tasks/tasks/delete-vm.yml + input_mapping: + config: configuration + params: + OPSMAN_CONFIG_FILE: foundations/((foundation))/config/opsman.yml + STATE_FILE: state-((foundation)).yml + ensure: + do: + - put: state + params: + file: generated-state/state-((foundation)).yml +``` + +This task requires the [state file](./inputs-outputs.html#state) generated [create-vm](#create-vm). + +The task does specific CLI commands for the deletion of the Tanzu Operations Manager VM and resources on each IAAS. See the following for more information: + +**AWS** + +1. Deletes the VM + +**Azure** + +1. Deletes the VM +1. Attempts to delete the associated disk +1. Attempts to delete the associated nic +1. Attempts to delete the associated image + +**GCP** + +1. Deletes the VM +1. Attempts to delete the associated image + +**OpenStack** + +1. Deletes the VM +1. Attempts to delete the associated image + +**vSphere** + +1. Deletes the VM + +### download-and-upload-product + +This is an _advanced task_. +If a product (and its associated stemcell) has already been uploaded to Tanzu Operations Manager +then it will not re-download and upload. +This is helpful when looking for a fast-feedback loop for building pipelines. + +This task is similar to [`download-product`](#download-product), +as it takes the same product config. +There are no `outputs` for this task +because the products (and stemcell) don't need to be shared downstream. + +

+This currently works only with product source being the Broadcom Support portal.

+ +**Task** + +<%= partial "../tasks/download-and-upload-product" %> + +**Implementation** + +<%= partial "../tasks/download-and-upload-product-script" %> + +**Usage** + +```yaml +- task: download-and-upload-pas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-and-upload-product.yml + input_mapping: + env: configuration + config: configuration + params: + ENV_FILE: foundations/config/env.yml + CONFIG_FILE: download-product-pivnet/download-tas.yml +``` + +### download-product + +Downloads a product specified in a config file from the Broadcom Support portal: (`pivnet`), S3(`s3`), GCS(`gcs`), or Azure(`azure`). +Optionally, also downloads the latest stemcell for that product. + +Downloads are cached, so files are not re-downloaded each time. +When downloading from the Broadcom Support portal, +the cached file is verified +using the the Broadcom Support portal checksum +to validate the integrity of that file. +If it does not, the file is re-downloaded. +When downloading from a supported blobstore +the cached file is not-verified, +as there is no checksum from those blobstore APIs to use. + +Outputs can be persisted to any supported blobstore using a `put` to an appropriate resource +for later use with download-product using the `SOURCE` parameter, +or used directly as inputs to [upload-and-stage-product](#upload-and-stage-product) +and [upload-stemcell](#upload-stemcell) tasks. + +This task requires a [download-product config file](./inputs-outputs.html#download-product-config). + +If stemcell-iaas is specified in the download-product config file, +and the specified product is a `.pivotal` file, +`download-product` will attempt to download the stemcell for the product. +It will retrieve the latest compatible stemcell for the specified IaaS. +The valid IaaSs are: + +- `aws` +- `azure` +- `google` +- `openstack` +- `vsphere` + +If a configuration for S3, GCS, or Azure is present in the [download-product config file](./inputs-outputs.html#download-product-config), +the slug and version of the downloaded product file will be prepended in brackets to the filename. +For example: + +- original-pivnet-filenames: + ``` + ops-manager-aws-2.5.0-build.123.yml + cf-2.5.0-build.45.pivotal + ``` + +- download-product-filenames if blobstore configuration is present: + ``` + [ops-manager,2.5.0]ops-manager-aws-2.5.0-build.123.yml + [elastic-runtime,2.5.0]cf-2.5.0-build.45.pivotal + ``` + +This is to allow the same config parameters +that let us select a file from the Broadcom Support portal +select it again when pulling from the supported blobstore. +Note that the filename will be unchanged +if supported blobstore keys are not present in the configuration file. +This avoids breaking current pipelines. + +

+When using the s3 resource in Concourse: +If you are using a regexp in your s3 resource definition +that explicitly requires the the Broadcom Support portal filename +to be the start of the regex, (that is, the pattern starts with ^) +this won't work when using S3 config. +The new file format preserves the original filename, +so it is still possible to match on that, +but if you need to match from the beginning of the filename, +that will have been replaced by the prefix described earlier.

+ +

+When specifying Tanzu Platform for Cloud Foundry [Windows]: +This task will automatically download and inject the winfs for pas-windows.

+ +

+This task cannot download the stemcell for pas-windows on vSphere. +To build this stemcell manually, see +Creating a vSphere Windows stemcell using stembuild +in the VMware Tanzu Platform for Cloud Foundry documentation.

+ +

+When the download product config only has the Broadcom Support portal credentials, +it will not add the prefix to the downloaded product. +For example, example-product.pivotal from the Broadcom Support portal will be displayed in the output +as example-product.pivotal.

+ +**Task** + +<%= partial "../tasks/download-product" %> + +**Implementation** + +<%= partial "../tasks/download-product-script" %> + +**Broadcom Support Portal Usage** (formerly **Tanzu Network**) + +```yaml +- name: fetch-pks + plan: + - in_parallel: + - get: daily + trigger: true + - get: platform-automation-image + params: + unpack: true + - get: platform-automation-tasks + params: + unpack: true + - get: configuration + - task: prepare-tasks-with-secrets + <<: *prepare-tasks-with-secrets + - task: download-pks-product-and-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product-pivnet/download-pks.yml + input_mapping: + config: configuration + output_mapping: {downloaded-stemcell: pks-stemcell} + - in_parallel: + - put: pks-product + params: + file: downloaded-product/*.pivotal + - put: pks-stemcell + params: + file: pks-stemcell/*.tgz +``` + +**S3 Usage** + +```yaml +- task: download-pks + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + input_mapping: + config: configuration + vars: configuration + params: + CONFIG_FILE: foundations/((foundation))/config/download-pks.yml + VARS_FILES: vars/foundations/((foundation))/vars/versions.yml + SOURCE: s3 + output_mapping: + downloaded-product: pks-product + downloaded-stemcell: pks-stemcell +``` + +**GCS Usage** + +```yaml +- task: download-pas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product/pas.yml + SOURCE: gcs + input_mapping: + config: configuration + output_mapping: + downloaded-product: pas-product + downloaded-stemcell: pas-stemcell +``` + +**Azure Usage** + +```yaml +- task: download-pas + image: platform-automation-image + file: platform-automation-tasks/tasks/download-product.yml + params: + CONFIG_FILE: download-product/pas.yml + SOURCE: azure + input_mapping: + config: configuration + output_mapping: + downloaded-product: pas-product + downloaded-stemcell: pas-stemcell +``` + +### expiring-certificates + +Returns a list of certificates that are expiring within a specified time frame. +These certificates can be Tanzu Operations Manager or CredHub certificates. +This is a purely informational task. + +**Task** + +<%= partial "../tasks/expiring-certificates" %> + +**Implementation** + +<%= partial "../tasks/expiring-certificates-script" %> + +**Usage** + +```yaml +- task: expiring-certificates + image: platform-automation-image + file: platform-automation-tasks/tasks/expiring-certificates.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + EXPIRES_WITHIN: 2m +``` + +### export-installation + +Exports an existing Tanzu Operations Manager to a file. + +This is the first part of the backup/restore and upgrade lifecycle processes. +This task is used on a fully installed and healthy Tanzu Operations Manager to export +settings to an upgraded version of Tanzu Operations Manager. + +To use with non-versioned blobstore, you can override `INSTALLATION_FILE` param +to include `$timestamp`, then the generated installation file will include a sortable +timestamp in the filename. + +example: +```yaml +params: + INSTALLATION_FILE: installation-$timestamp.zip +``` + +

+The timestamp is generated using the time on Concourse worker. +If the time is different on different workers, the generated timestamp may fail to sort correctly. +Changing the time or timezone on workers might interfere with ordering.

+ +**Task** + +<%= partial "../tasks/export-installation" %> + +**Implementation** + +<%= partial "../tasks/export-installation-script" %> + +**Usage** + +```yaml +- task: export-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/export-installation.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml + INSTALLATION_FILE: ((foundation))-installation-$timestamp.zip +``` + +<%= partial "./export_installation_note" %> + +### generate-certificate + +Generate a certificate, signed by the active Tanzu Operations Manager certificate authority, +for the domains specified in the `DOMAINS` environment variable. + +This task outputs `certificate`, containing `certificate.pem` and +`privatekey.pem` for the new certificate. + +**Task** + +<%= partial "../tasks/generate-certificate" %> + +**Implementation** + +<%= partial "../tasks/generate-certificate-script" %> + + + +### import-installation + +Imports a previously exported installation to Tanzu Operations Manager. + +This is a part of the backup/restore and upgrade lifecycle processes. +This task is used after an installation has been exported and a new Tanzu Operations Manager +has been deployed, but before the new Tanzu Operations Manager is configured. + +**Task** + +<%= partial "../tasks/import-installation" %> + +**Implementation** + +<%= partial "../tasks/import-installation-script" %> + +**Usage** + +```yaml +- task: import-installation + image: platform-automation-image + file: platform-automation-tasks/tasks/import-installation.yml + input_mapping: + env: configuration + params: + ENV_FILE: ((foundation))/env/env.yml + INSTALLATION_FILE: installation-*.zip +``` + +### make-git-commit + +Copies a single file into a repo and makes a commit. +This is useful for persisting the state output of tasks that manage the VM, such as: + +- [create-vm](#create-vm) +- [upgrade-opsman](#upgrade-opsman) +- [delete-vm](#delete-vm) + +It is also useful for persisting the configuration output from: + +- [staged-config](#staged-config) +- [staged-director-config](#staged-director-config) + +

+This commits all changes present +in the repo used for the repository input, +in addition to copying in a single file.

+ +

+This does not perform a git push. +You must put the output of this task to a git resource to persist it.

+ +**Task** + +<%= partial "../tasks/make-git-commit" %> + +**Implementation** + +<%= partial "../tasks/make-git-commit-script" %> + +**Usage** + +```yaml +- task: make-commit + image: platform-automation-image + file: platform-automation-tasks/tasks/make-git-commit.yml + input_mapping: + repository: configuration + file-source: generated-state + output_mapping: + repository-commit: configuration-commit + params: + FILE_SOURCE_PATH: state.yml + FILE_DESTINATION_PATH: state/state.yml + GIT_AUTHOR_EMAIL: "pcf-pipeline-bot@example.com" + GIT_AUTHOR_NAME: "Platform Automation Bot" + COMMIT_MESSAGE: 'Update state file' +``` + +### pre-deploy-check + +Checks if the Tanzu Operations Manager director is configured properly and validates the configuration. +This feature is only available in Tanzu Operations Manager 2.6+. +Additionally, checks each of the staged products +and validates they are configured correctly. +This task can be run at any time +and can be used a a pre-check for [`apply-changes`](#apply-changes). + +The checks that this task executes are: + +- is configuration complete and valid +- is the network assigned +- is the availability zone assigned +- is the stemcell assigned +- what stemcell type/version is required +- are there any unset/invalid properties +- did any Tanzu Operations Manager verifiers fail + +If any of the above checks fail +the task will fail. +The failed task will provide a list of errors that need to be fixed +before an `apply-changes` operation can start. + +**Task** + +<%= partial "../tasks/pre-deploy-check" %> + +**Implementation** + +<%= partial "../tasks/pre-deploy-check-script" %> + +**Usage** + +```yaml +- task: pre-deploy-check + image: platform-automation-image + file: platform-automation-tasks/tasks/pre-deploy-check.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### prepare-image + +This task modifies the container image with runtime dependencies. +`CA_CERTS` can be added, +which can help secure HTTP connections with a proxy server +and allows the use of a custom CA on the Tanzu Operations Manager. + +

+Concourse 5+ only: +This task uses a Concourse feature +that allows inputs and outputs to have the same name. +This feature is only available in Concourse v5+. +prepare-image does not work with Concourse v4.

+ +**Task** + +<%= partial "../tasks/prepare-image" %> + +**Implementation** + +<%= partial "../tasks/prepare-image-script" %> + +**Usage** + +```yaml +prepare-image: &prepare-image + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-image.yml + params: + CA_CERTS: ((opsman-ssl.ca)) +``` + +### prepare-tasks-with-secrets + +Modifies task files to include variables needed for config files as environment variables +for run-time interpolation from a secret store. +See [Using a secrets store to store credentials](./concepts/secrets-handling.html). + +

+Concourse 5+ only: +This task uses a Concourse feature +that allows inputs and outputs to have the same name. +This feature is only available in Concourse v5+. +prepare-tasks-with-secrets does not work with Concourse v4.

+ +**Task** + +<%= partial "../tasks/prepare-tasks-with-secrets" %> + +**Implementation** + +<%= partial "../tasks/prepare-tasks-with-secrets-script" %> + +**Usage** + +```yaml +# This task is used in multiple jobs +# The YAML anchor "*prepare-tasks-with-secrets" is used in its place +prepare-tasks-with-secrets: &prepare-tasks-with-secrets + image: platform-automation-image + file: platform-automation-tasks/tasks/prepare-tasks-with-secrets.yml + input_mapping: + tasks: platform-automation-tasks + config: configuration + vars: configuration + params: + CONFIG_PATHS: config/foundations/config config/foundations/((foundation))/config + VARS_PATHS: vars/foundations/((foundation))/vars + output_mapping: + tasks: platform-automation-tasks +``` + +### regenerate-certificates + +Regenerates all non-configurable leaf certificates managed by Tanzu Operations Manager using +the active certificate authority. + +**Task** + +<%= partial "../tasks/regenerate-certificates" %> + +**Implementation** + +<%= partial "../tasks/regenerate-certificates-script" %> + +**Usage** + +```yaml +- task: regenerate-certificates + image: platform-automation-image + file: platform-automation-tasks/tasks/regenerate-certificates.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### replicate-product + +This replicates the product for use in isolation segments. +The task requires a downloaded product prior to replication. +The output is a replicated tile with a new name in the metadata and filename. + +

+replicate-product does not support storing the replicated product +in a non-versioned blobstore, because it cannot generate a unique name. +It is recommended to use the replicated tile immediately in the next task +rather than storing it and using it in a different job.

+ +**Task** + +<%= partial "../tasks/replicate-product" %> + +**Implementation** + +<%= partial "../tasks/replicate-product-script" %> + +**Usage** + +```yaml +- task: replicate-product + image: platform-automation-image + file: platform-automation-tasks/tasks/replicate-product.yml + input_mapping: + product: pas-windows + params: + REPLICATED_NAME: iso-1 +``` + +### revert-staged-changes + +Reverts all changes that are currently staged on the Tanzu Operations Manager. + +

+Since revert-staged-changes reverts all changes on a Tanzu Operations Manager, +it can conflict with tasks that perform stage or configure operations. +Use passed constraints to ensure things run in the order you mean them to.

+ +**Task** + +<%= partial "../tasks/revert-staged-changes" %> + +**Implementation** + +<%= partial "../tasks/revert-staged-changes-script" %> + +**Usage** + +```yaml +- task: revert-staged-changes + image: platform-automation-image + file: platform-automation-tasks/tasks/revert-staged-changes.yml + input_mapping: + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### run-bosh-errand + +Runs a specified BOSH errand directly on the BOSH Director +by tunneling through Tanzu Operations Manager. + +

+Tanzu Operations Manager is the main interface for interacting with BOSH, +and it has no way of knowing what is happening to the BOSH Director +outside of the Tanzu Operations Manager UI context. +By using this task, you are accepting the risk +that what you are doing cannot be tracked by your Tanzu Operations Manager.

+ +

+Tanzu Operations Manager, by design, will re-run failed errands for you. +As this task interacts with BOSH directly, +your errand will not be re-run if it fails. +To replicate this retry behavior in your pipeline, use +the attempts +Concourse feature to run the task more than once.

+ +**Task** + +<%= partial "../tasks/run-bosh-errand" %> + +**Implementation** + +<%= partial "../tasks/run-bosh-errand-script" %> + +**Usage** + +```yaml +- task: run-bosh-errand + image: platform-automation-image + file: platform-automation-tasks/tasks/run-bosh-errand.yml + input_mapping: + env: configuration + params: + PRODUCT_NAME: cf + ERRAND_NAME: smoke_tests + ENV_FILE: foundations/config/env.yml + OPSMAN_SSH_PRIVATE_KEY: ((ops_manager_ssh_private_key)) +``` + +### send-telemetry + +Sends the `.tar` output from [`collect-telemetry`](#collect-telemetry) +to VMware. + +

+To use the send-telemetry, +you must acquire a license key. +Contact your VMware representative.

+ +**Task** + +<%= partial "../tasks/send-telemetry" %> + +**Implementation** + +<%= partial "../tasks/send-telemetry-script" %> + +**Usage** + +```yaml +- task: send-telemetry-data + attempts: 3 + image: platform-automation-image + file: platform-automation-tasks/tasks/send-telemetry.yml + params: + API_KEY: no-op-test-key + DATA_FILE_PATH: collected-telemetry-data/FoundationDetails*.tar +``` + +### stage-configure-apply + +This is an _advanced task_. +Stage a product to Tanzu Operations Manager, configure that product, and apply changes +only to that product without applying changes to the rest of the foundation. + +<%= partial "disable-verifiers" %> + +**Task** + +<%= partial "../tasks/stage-configure-apply" %> + +**Implementation** + +<%= partial "../tasks/stage-configure-apply-script" %> + +**Usage** + +```yaml +- task: stage-configure-apply + image: platform-automation-image + file: platform-automation-tasks/tasks/stage-configure-apply.yml + attempts: 3 + params: + CONFIG_FILE: foundations/((foundation))/config/p-telemetry.yml + STAGE_PRODUCT_CONFIG_FILE: foundations/((foundation))/config/p-telemetry.yml + ENV_FILE: foundations/config/env.yml + VARS_FILES: | + vars/foundations/((foundation))/vars/director.yml + input_mapping: + env: configuration + config: configuration + vars: configuration +``` + +### stage-product +Staged a product to the Tanzu Operations Manager specified in the config file. + +**Task** + +<%= partial "../tasks/stage-product" %> + +**Implementation** + +<%= partial "../tasks/stage-product-script" %> + +**Usage** + +```yaml +- task: upload-and-stage-tas + image: platform-automation-image + file: platform-automation-tasks/tasks/stage-product.yml + input_mapping: + product: tas-product + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### staged-config + +Downloads the configuration for a product from Tanzu Operations Manager. + +This is not to be confused with Tanzu Operations Manager's +built-in [export](https://techdocs.broadcom.com/us/en/vmware-tanzu/platform/tanzu-operations-manager/3-0/tanzu-ops-manager/install-backup-restore-backup-pcf-bbr.html#export), +which puts all deployed products and configurations into a single file, +intended for import as part of backup/restore and upgrade lifecycle processes. + +**Task** + +<%= partial "../tasks/staged-config" %> + +**Implementation** + +<%= partial "../tasks/staged-config-script" %> + +**Usage** + +```yaml +- task: staged-config + image: platform-automation-image + file: platform-automation-tasks/tasks/staged-config.yml + input_mapping: + env: configuration + params: + PRODUCT_NAME: cf + ensure: + do: + - put: state + params: + file: generated-state/state.yml +``` + +### staged-director-config + +Downloads configuration for the BOSH director from Tanzu Operations Manager. + +**Task** + +<%= partial "../tasks/staged-director-config" %> + +**Implementation** + +<%= partial "../tasks/staged-director-config-script" %> + +**Usage** + +```yaml +- task: staged-director-config + image: platform-automation-image + file: platform-automation-tasks/tasks/staged-director-config.yml + input_mapping: + env: configuration + ensure: + do: + - put: state + params: + file: generated-state/state.yml +``` + +The configuration is exported to the `generated-config` output. +It does not extract credentials from Tanzu Operations Manager +and replaced them all with YAML interpolation `(())` placeholders. +This is to ensure that credentials are never written to disk. +The credentials need to be provided from an external configuration when invoking [configure-director](#configure-director). + +<%= partial "missing_fields_opsman_director" %> + +### test + +An example task to ensure the assets and Docker image are setup correctly in your Concourse pipeline. + +**Task** + +<%= partial "../tasks/test" %> + +**Implementation** + +<%= partial "../tasks/test-script" %> + +**Usage** + +```yaml +- task: test + file: platform-automation-tasks/tasks/test.yml + image: platform-automation-image +``` + +### test-interpolate + +An example task to ensure that all required vars are present when interpolating into a base file. +For more instruction on this topic, see the [variables](concepts/variables.html) section. + +**Task** + +<%= partial "../tasks/test-interpolate" %> + +**Implementation** + +<%= partial "../tasks/test-interpolate-script" %> + +**Usage** + +```yaml +- task: test-interpolate + image: platform-automation-image + file: platform-automation-tasks/tasks/test-interpolate.yml + params: + CONFIG_FILE: foundations/((foundation))/config/download-tas.yml + SKIP_MISSING: true + input_mapping: + config: configuration +``` + +### update-runtime-config + +This is an _advanced task_. +Updates a runtime config on the Tanzu Operations Manager deployed BOSH director. +The task will interact with the BOSH director (sometimes via SSH tunnel through the Tanzu Operations Manager), +upload BOSH releases, +and set a named runtime config. +This is useful when installing agents on BOSH deployed VMs that don't have a Tanzu Operations Manager tile. + +**Task** + +<%= partial "../tasks/update-runtime-config" %> + +**Implementation** + +<%= partial "../tasks/update-runtime-config-script" %> + +**Usage** + +```yaml +- task: update-runtime-config + image: platform-automation-image + file: platform-automation-tasks/tasks/update-runtime-config.yml + input_mapping: + config: configuration + env: configuration + releases: bosh-releases + params: + CONFIG_FILE: runtime-config.yml + NAME: my-runtime-config + OPSMAN_SSH_PRIVATE_KEY: ((opsman-ssh-private-key)) +``` + +

+When using runtime configs, Tanzu Operations Manager owns the default runtime config. +If you use this task to edit "default" it will be replaced on every Apply Changes. +Use the NAME param to provide a non-conflicting runtime config.

+ +### upgrade-opsman + +Upgrades an existing Tanzu Operations Manager to a new given Tanzu Operations Manager version + +**Task** + +<%= partial "../tasks/upgrade-opsman" %> + +**Implementation** + +<%= partial "../tasks/upgrade-opsman-script" %> + +**Usage** + +```yaml +- task: upgrade-opsman + image: platform-automation-image + file: platform-automation-tasks/tasks/upgrade-opsman.yml + input_mapping: + image: opsman-image + config: configuration + env: configuration + vars: configuration + params: + ENV_FILE: foundations/config/env.yml + OPSMAN_CONFIG_FILE: foundations/((foundation))/config/opsman.yml + STATE_FILE: state-((foundation)).yml + INSTALLATION_FILE: ((foundation))-installation*.zip + VARS_FILES: vars/foundations/((foundation))/vars/director.yml + ensure: *put-state +``` + +For more information about this task and how it works, see [Recovering and upgrading Tanzu Operations Manager](concepts/upgrade.html). + +### upload-and-stage-product + +Uploads and stages product to the Tanzu Operations Manager specified in the config file. + +**Task** + +<%= partial "../tasks/upload-and-stage-product" %> + +**Implementation** + +<%= partial "../tasks/upload-and-stage-product-script" %> + +**Usage** + +```yaml +- task: upload-and-stage-pks + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-and-stage-product.yml + input_mapping: + product: pks-product + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### upload-product + +Uploads a product to the Tanzu Operations Manager specified in the config file. + +If a shasum is provided in the config.yml, +the integrity product will be verified +with that shasum before uploading. + +**Task** + +<%= partial "../tasks/upload-product" %> + +**Implementation** + +<%= partial "../tasks/upload-product-script" %> + +**Usage** + +```yaml +- task: upload-tas-product + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-product.yml + input_mapping: + product: tas-product + env: configuration + params: + ENV_FILE: foundations/config/env.yml +``` + +### upload-stemcell + +Uploads a stemcell to Tanzu Operations Manager. + +Note that the filename of the stemcell must be exactly as downloaded from the Broadcom Support portal. +Tanzu Operations Manager parses this filename to determine the version and OS of the stemcell. + +**Task** + +<%= partial "../tasks/upload-stemcell" %> + +**Implementation** + +<%= partial "../tasks/upload-stemcell-script" %> + +**Usage** + +```yaml +- task: upload-pks-stemcell + image: platform-automation-image + file: platform-automation-tasks/tasks/upload-stemcell.yml + input_mapping: + env: configuration + stemcell: pks-stemcell + params: + ENV_FILE: foundations/config/env.yml +``` + +[//]: # ({% include ".internal_link_url.md" %}) +[//]: # ({% include ".external_link_url.md" %}) diff --git a/docs/tasks.md b/docs/tasks.md deleted file mode 100644 index 499153c1..00000000 --- a/docs/tasks.md +++ /dev/null @@ -1,960 +0,0 @@ -## Platform Automation Toolkit Tasks -This document lists each Platform Automation Toolkit task, -and provides information about their intentions, inputs, and outputs. - -The tasks are presented, in their entirety, -as they are found in the product. - -The docker image can be used to invoke the commands in each task locally. -Use `--help` for more information. -To learn more see the [running-commands-locally][running-commands-locally] section. - -### activate-certificate-authority - -Ensures that the newest certificate authority on Ops Manager is active. - -=== "Task" - ---excerpt--- "tasks/activate-certificate-authority" -=== "Implementation" - ---excerpt--- "tasks/activate-certificate-authority-script" -=== "Usage" - ---excerpt--- "reference/activate-certificate-authority-usage" - -### apply-changes - -Triggers an install on the Ops Manager described by the auth file. - -To optionally provide an errand file to manually control errands -for a particular of run of `apply-changes`. -To see an example of this config file, see the [Inputs and Outputs][errand-config] section. - -{% include '.disable-verifiers.md' %} - -=== "Task" - ---excerpt--- "tasks/apply-changes" -=== "Implementation" - ---excerpt--- "tasks/apply-changes-script" -=== "Usage" - ---excerpt--- "reference/apply-changes-usage" - -### apply-director-changes -`apply-changes` can also be used to trigger an install for just the BOSH Director -with the `--skip-deploy-products`/`-sdp` flag. - -{% include '.disable-verifiers.md' %} - -=== "Task" - ---excerpt--- "tasks/apply-director-changes" -=== "Implementation" - ---excerpt--- "tasks/apply-director-changes-script" -=== "Usage" - ---excerpt--- "reference/apply-director-changes-usage" - -### assign-multi-stemcell -`assign-multi-stemcell` assigns multiple stemcells to a provided product. -This feature is only available in OpsMan 2.6+. -For more information on how to utilize this workflow, -check out the [Stemcell Handling][stemcell-handling] topic. - -=== "Task" - ---excerpt--- "tasks/assign-multi-stemcell" -=== "Implementation" - ---excerpt--- "tasks/assign-multi-stemcell-script" -=== "Usage" - ---excerpt--- "examples/assign-multi-stemcell-usage" - -### assign-stemcell -`assign-stemcell` assigns a stemcell to a provided product. -For more information on how to utilize -this workflow, check out the [Stemcell Handling][stemcell-handling] topic. - -=== "Task" - ---excerpt--- "tasks/assign-stemcell" -=== "Implementation" - ---excerpt--- "tasks/assign-stemcell-script" -=== "Usage" - ---excerpt--- "examples/assign-stemcell-usage" - -### backup-director - -Use BBR to backup a BOSH director deployed with Ops Manager. - -=== "Task" - ---excerpt--- "tasks/backup-director" -=== "Implementation" - ---excerpt--- "tasks/backup-director-script" -=== "Usage" - ---excerpt--- "examples/backup-director-usage" - -### backup-product - -Use BBR to backup a product deployed with Ops Manager. - -=== "Task" - ---excerpt--- "tasks/backup-product" -=== "Implementation" - ---excerpt--- "tasks/backup-product-script" -=== "Usage" - ---excerpt--- "examples/backup-product-usage" - -### backup-tkgi - -Use BBR to backup Tanzu Kubernetes Grid Integrated Edition(TKGI) -deployed with Ops Manager. - -!!! info "PKS CLI may be Temporarily Unavailable" - During the backup, the PKS CLI is disabled. - Due to the nature of the backup, some commands may not work as expected. - -??? warning "Known Issue" - - When using the task [`backup-tkgi`][backup-tkgi] behind a proxy - the values for `no_proxy` can affect the ssh (though jumpbox) tunneling. - When the task invokes the `bbr` CLI, an environment variable (`BOSH_ALL_PROXY`) has been set, - this environment variable tries to honor the `no_proxy` settings. - The task's usage of the ssh tunnel requires the `no_proxy` to not be set. - - If you experience an error, such as an SSH connection refused or connection timeout, - try setting the `no_proxy: ""` as `params` on the task. - - For example, - - ```yaml - - task: backup-tkgi - file: platform-automation-tasks/tasks/backup-tkgi.yml - params: - no_proxy: "" - ``` - -=== "Task" - ---excerpt--- "tasks/backup-tkgi" -=== "Implementation" - ---excerpt--- "tasks/backup-tkgi-script" -=== "Usage" - ---excerpt--- "examples/backup-tkgi-usage" - -### check-pending-changes -Returns a table of the current state of your Ops Manager -and lists whether each product is changed or unchanged and the errands for that product. -By default, `ALLOW_PENDING_CHANGES: false` will force the task to fail. -This is useful to keep manual changes from being accidentally applied -when automating the [configure-product][configure-product]/[apply-changes][apply-changes] of other products. - -=== "Task" - ---excerpt--- "tasks/check-pending-changes" -=== "Implementation" - ---excerpt--- "tasks/check-pending-changes-script" -=== "Usage" - ---excerpt--- "reference/check-pending-changes-usage" - -### collect-telemetry -Collects foundation information -using the [Telemetry Collector][telemetry-docs] tool. - -This task requires the `telemetry-collector-binary` as an input. -The binary is available on [Tanzu Network][telemetry]; -you will need to define a `resource` to supply the binary. - -This task requires a [config file][telemetry-config]. - -After using this task, -the [send-telemetry][send-telemetry] -may be used to send telemetry data to VMware. - -=== "Task" - ---excerpt--- "tasks/collect-telemetry" -=== "Implementation" - ---excerpt--- "tasks/collect-telemetry-script" -=== "Usage" - ---excerpt--- "reference/collect-telemetry-usage" - -### configure-authentication -Configures Ops Manager with an internal userstore and admin user account. -See [configure-saml-authentication](#configure-saml-authentication) to configure an external SAML user store, -and [configure-ldap-authentication](#configure-ldap-authentication) to configure with LDAP. - -=== "Task" - ---excerpt--- "tasks/configure-authentication" -=== "Implementation" - ---excerpt--- "tasks/configure-authentication-script" -=== "Usage" - ---excerpt--- "reference/configure-authentication-usage" - -For details on the config file expected in the `config` input, -please see [Generating an Auth File][generating-an-auth-file]. - -### configure-director -Configures the BOSH Director with settings from a config file. -See [staged-director-config](#staged-director-config), -which can extract a config file. - -=== "Task" - ---excerpt--- "tasks/configure-director" -=== "Implementation" - ---excerpt--- "tasks/configure-director-script" -=== "Usage" - ---excerpt--- "reference/configure-director-usage" - -!!! warning "GCP with service account" - For GCP, if service account is used, the property associated_service_account has to be set explicitly in the `iaas_configuration` section. - -### configure-ldap-authentication -Configures Ops Manager with an external LDAP user store and admin user account. -See [configure-authentication](#configure-authentication) to configure an internal user store, -and [configure-saml-authentication](#configure-saml-authentication) to configure with SAML. - -=== "Task" - ---excerpt--- "tasks/configure-ldap-authentication" -=== "Implementation" - ---excerpt--- "tasks/configure-ldap-authentication-script" -=== "Usage" - ---excerpt--- "examples/configure-ldap-authentication-usage" - -For more details on using LDAP, -please refer to the [Ops Manager documentation][config-ldap]. - -For details on the config file expected in the `config` input, -please see [Generating an Auth File][generating-an-auth-file]. - -### configure-opsman -This task supports configuring settings -on the Ops Manager Settings page in the UI. -For example, the SSL cert for the Ops Manager VM can be configured. - -Configuration can be added directly to [`opsman.yml`][inputs-outputs-configure-opsman]. -An example of all configurable properties can be found in the "Additional Settings" tab. - -The [`upgrade-opsman`][upgrade-opsman] task will automatically call `configure-opsman`, -so there is no need to use this task then. -It is recommended to use this task in the initial setup of the Ops Manager VM. - -=== "Task" - ---excerpt--- "tasks/configure-opsman" -=== "Implementation" - ---excerpt--- "tasks/configure-opsman-script" -=== "Usage" - ---excerpt--- "reference/configure-opsman-usage" - -### configure-product -Configures an individual, staged product with settings from a config file. - -Not to be confused with Ops Manager's -built-in [import][bbr-import], -which reads all deployed products and configurations from a single opaque file, -intended for import as part of backup/restore and upgrade lifecycle processes. - -See [staged-config](#staged-config), -which can extract a config file, -and [upload-and-stage-product](#upload-and-stage-product), -which can stage a product that's been uploaded. - -=== "Task" - ---excerpt--- "tasks/configure-product" -=== "Implementation" - ---excerpt--- "tasks/configure-product-script" -=== "Usage" - ---excerpt--- "reference/configure-product-usage" - -### configure-new-certificate-authority - -Create a new certificate authority on Ops Manager. This can either create a -new CA using Credhub or create a new CA using a provided certificate and -private key in PEM format via the `certs/` input. - -=== "Task" - ---excerpt--- "tasks/configure-new-certificate-authority" -=== "Implementation" - ---excerpt--- "tasks/configure-new-certificate-authority-script" -=== "Usage" - ---excerpt--- "reference/configure-new-certificate-authority-usage" - -### configure-saml-authentication -Configures Ops Manager with an external SAML user store and admin user account. -See [configure-authentication](#configure-authentication) to configure an internal user store, -and [configure-ldap-authentication](#configure-ldap-authentication) to configure with LDAP. - -=== "Task" - ---excerpt--- "tasks/configure-saml-authentication" -=== "Implementation" - ---excerpt--- "tasks/configure-saml-authentication-script" -=== "Usage" - ---excerpt--- "examples/configure-saml-authentication-usage" - -!!! info "Bosh Admin Client" - By default, this task creates a bosh admin client. - This is helpful for some advanced workflows - that involve communicating directly with the BOSH Director. - It is possible to disable this behavior; - see our [config file documentation][generating-an-auth-file] for details. - -Configuring SAML has two different auth flows for the UI and the task. -The UI will have a browser based login flow. -The CLI will require `client-id` and `client-secret` as it cannot do a browser login flow. - -For more details on using SAML, -please refer to the [Ops Manager documentation][config-saml] - -For details on the config file expected in the `config` input, -please see [Generating an Auth File][generating-an-auth-file]. - -### create-vm -Creates an unconfigured Ops Manager VM. - -=== "Task" - ---excerpt--- "tasks/create-vm" -=== "Implementation" - ---excerpt--- "tasks/create-vm-script" -=== "Usage" - ---excerpt--- "reference/create-vm-usage" - -This task requires a config file specific to the IaaS being deployed to. -Please see the [configuration][opsman-config] page for more specific examples. - -The task does specific CLI commands for the creation of the Ops Manager VM on each IAAS. See below for more information: - -**AWS** - -1. Requires the image YAML file from Tanzu Network -2. Validates the existence of the VM if defined in the statefile, if so do nothing -3. Chooses the correct ami to use based on the provided image YAML file from Tanzu Network -4. Creates the VM configured via opsman config and the image YAML. This only attaches existing infrastructure to a newly created VM. This does not create any new resources -5. The public IP address, if provided, is assigned after successful creation - -**Azure** - -1. Requires the image YAML file from Tanzu Network -1. Validates the existence of the VM if defined in the statefile, if so do nothing -1. Copies the image (of the OpsMan VM from the specified region) as a blob into the specified storage account -1. Creates the Ops Manager image -1. Creates a VM from the image. This will use unmanaged disk (if specified), and assign a public and/or private IP. This only attaches existing infrastructure to a newly createdVM. This does not create any new resources. - -**GCP** - -1. Requires the image YAML file from Tanzu Network -1. Validates the existence of the VM if defined in the statefile, if so do nothing -1. Creates a compute image based on the region specific Ops Manager source URI in the specified Ops Manager account -1. Creates a VM from the image. This will assign a public and/or private IP address, VM sizing, and tags. This does not create any new resources. - -**Openstack** - -1. Requires the image YAML file from Tanzu Network -1. Validates the existence of the VM if defined in the statefile, if so do nothing -1. Recreates the image in openstack if it already exists to validate we are using the correct version of the image -1. Creates a VM from the image. This does not create any new resources -1. The public IP address, if provided, is assigned after successful creation - -**Vsphere** - -1. Requires the OVA image from Tanzu Network -1. Validates the existence of the VM if defined in the statefile, if so do nothing -1. Build ipath from the provided datacenter, folder, and vmname provided in the config file. The created VM is stored on the generated path. If folder is not provided, the VM will be placed in the datacenter. -1. Creates a VM from the image provided to the `create-vm` command. This does not create any new resources - - -### credhub-interpolate -Interpolate credhub entries into configuration files - -!!! warning "Deprecation Notice" - This task will be deprecated in future _major_ versions of Platform Automation Toolkit. - -!!! info "prepare-tasks-with-secrets" - The [prepare-tasks-with-secrets] task replaces the credhub-interpolate task on Concourse versions 5.x+ - and provides additional benefits. - -=== "Task" - ---excerpt--- "tasks/credhub-interpolate" -=== "Implementation" - ---excerpt--- "tasks/credhub-interpolate-script" -=== "Usage" - ---excerpt--- "examples/credhub-interpolate-usage" - -This task requires a valid credhub with UAA client and secret. For information on how to -set this up, see [Secrets Handling][secrets-handling] - -### delete-certificate-authority - -Delete all inactive certificate authorities from the Ops Manager. - -=== "Task" - ---excerpt--- "tasks/delete-certificate-authority" -=== "Implementation" - ---excerpt--- "tasks/delete-certificate-authority-script" -=== "Usage" - ---excerpt--- "reference/delete-certificate-authority-usage" - -### delete-installation -Delete the Ops Manager Installation - -=== "Task" - ---excerpt--- "tasks/delete-installation" -=== "Implementation" - ---excerpt--- "tasks/delete-installation-script" -=== "Usage" - ---excerpt--- "reference/delete-installation-usage" - -### delete-vm -Deletes the Ops Manager VM instantiated by [create-vm](#create-vm). - -=== "Task" - ---excerpt--- "tasks/delete-vm" -=== "Implementation" - ---excerpt--- "tasks/delete-vm-script" -=== "Usage" - ---excerpt--- "reference/delete-vm-usage" - -This task requires the [state file][state] generated [create-vm](#create-vm). - -The task does specific CLI commands for the deletion of the Ops Manager VM and resources on each IAAS. See below for more information: - -**AWS** - -1. Deletes the VM - -**Azure** - -1. Deletes the VM -1. Attempts to delete the associated disk -1. Attempts to delete the associated nic -1. Attempts to delete the associated image - -**GCP** - -1. Deletes the VM -1. Attempts to delete the associated image - -**Openstack** - -1. Deletes the VM -1. Attempts to delete the associated image - -**vSphere** - -1. Deletes the VM - -### download-and-upload-product -This is an _advanced task_. -If a product (and its associated stemcell) has already been uploaded to Ops Manager -then it will not re-download and upload. -This is helpful when looking for a fast-feedback loop for building pipelines. - -This task is similar to [`download-product`][download-product], -as it takes the same product config. -There are no `outputs` for this task -because the products (and stemcell) don't need to be shared downstream. - -!!! warning - This currently only works with product source being Tanzu Network (Pivotal Network). - -=== "Task" - ---excerpt--- "tasks/download-and-upload-product" -=== "Implementation" - ---excerpt--- "tasks/download-and-upload-product-script" -=== "Usage" - ---excerpt--- "examples/download-and-upload-product-usage" - -### download-product - -{% include "./.opsman_filename_change_note.md" %} - -Downloads a product specified in a config file from Tanzu Network(`pivnet`), S3(`s3`), GCS(`gcs`), or Azure(`azure`). -Optionally, also downloads the latest stemcell for that product. - -Downloads are cached, so files are not re-downloaded each time. -When downloading from Tanzu Network, -the cached file is verified -using the Tanzu Network checksum -to validate the integrity of that file. -If it does not, the file is re-downloaded. -When downloading from a supported blobstore -the cached file is not-verified, -as there is no checksum from those blobstore APIs to use. - -Outputs can be persisted to any supported blobstore using a `put` to an appropriate resource -for later use with download-product using the `SOURCE` parameter, -or used directly as inputs to [upload-and-stage-product](#upload-and-stage-product) -and [upload-stemcell](#upload-stemcell) tasks. - -This task requires a [download-product config file][download-product-config]. - -If stemcell-iaas is specified in the [download-product config file][download-product-config], -and the specified product is a `.pivotal` file, -`download-product` will attempt to download the stemcell for the product. -It will retrieve the latest compatible stemcell for the specified IaaS. -The valid IaaSs are: - -- `aws` -- `azure` -- `google` -- `openstack` -- `vsphere` - -If a configuration for S3, GCS, or Azure is present in the [download-product config file][download-product-config], -the slug and version of the downloaded product file will be prepended in brackets to the filename. -For example: - -- original-pivnet-filenames: - ``` - ops-manager-aws-2.5.0-build.123.yml - cf-2.5.0-build.45.pivotal - ``` - -- download-product-filenames if blobstore configuration is present: - ``` - [ops-manager,2.5.0]ops-manager-aws-2.5.0-build.123.yml - [elastic-runtime,2.5.0]cf-2.5.0-build.45.pivotal - ``` - -This is to allow the same config parameters -that let us select a file from Tanzu Network -select it again when pulling from the supported blobstore. -Note that the filename will be unchanged -if supported blobstore keys are not present in the configuration file. -This avoids breaking current pipelines. - -!!! warning "When using the s3 resource in concourse" - If you are using a `regexp` in your s3 resource definition - that explicitly requires the Tanzu Network filename - to be the _start_ of the regex, (i.e., the pattern starts with `^`) - this won't work when using S3 config. - The new file format preserves the original filename, - so it is still possible to match on that - - but if you need to match from the beginning of the filename, - that will have been replaced by the prefix described above. - -!!! info "When specifying Tanzu Application Service-Windows" - This task will automatically download and inject the winfs for pas-windows. - -!!! warning "When specifying Tanzu Application Service-Windows on Vsphere" - This task cannot download the stemcell for pas-windows on vSphere. - To build this stemcell manually, please reference the - [Creating a vSphere Windows Stemcell][create-vsphere-windows-stemcell] guide - in VMware Documentation. - -!!! info "When only downloading from Tanzu Network" - When the download product config only has Tanzu Network credentials, - it will not add the prefix to the downloaded product. - For example, `example-product.pivotal` from Tanzu Network will be outputed - as `example-product.pivotal`. - -=== "Task" - ---excerpt--- "tasks/download-product" -=== "Implementation" - ---excerpt--- "tasks/download-product-script" -=== "Tanzu Network Usage" - ---excerpt--- "reference/download-product-usage" -=== "S3 Usage" - ---excerpt--- "reference/download-product-usage-s3" -=== "GCS Usage" - ---excerpt--- "examples/download-product-usage-gcs" -=== "Azure Usage" - ---excerpt--- "examples/download-product-usage-azure" - -### expiring-certificates -Returns a list of certificates that are expiring within a time frame. -These certificates can be Ops Manager or Credhub certificates. -Root CAs cannot be included in this list until Ops Manager 2.7. -This is purely an informational task. - -=== "Task" - ---excerpt--- "tasks/expiring-certificates" -=== "Implementation" - ---excerpt--- "tasks/expiring-certificates-script" -=== "Usage" - ---excerpt--- "reference/expiring-certificates-usage" - -### export-installation -Exports an existing Ops Manager to a file. - -This is the first part of the backup/restore and upgrade lifecycle processes. -This task is used on a fully installed and healthy Ops Manager to export -settings to an upgraded version of Ops Manager. - -To use with non-versioned blobstore, you can override `INSTALLATION_FILE` param -to include `$timestamp`, then the generated installation file will include a sortable -timestamp in the filename. - -example: -```yaml -params: - INSTALLATION_FILE: installation-$timestamp.zip -``` - -!!! info - The timestamp is generated using the time on concourse worker. - If the time is different on different workers, the generated timestamp may fail to sort correctly. - Changing the time or timezone on workers might interfere with ordering. - -=== "Task" - ---excerpt--- "tasks/export-installation" -=== "Implementation" - ---excerpt--- "tasks/export-installation-script" -=== "Usage" - ---excerpt--- "reference/export-installation-usage" - -{% include "./.export_installation_note.md" %} - -### generate-certificate - -Generate a certificate, signed by the active Ops Manager certificate authority, -for the domains specified in the `DOMAINS` environment variable. - -This task outputs `certificate`, containing `certificate.pem` and -`privatekey.pem` for the new certificate. - -=== "Task" - ---excerpt--- "tasks/generate-certificate" -=== "Implementation" - ---excerpt--- "tasks/generate-certificate-script" - - -### import-installation -Imports a previously exported installation to Ops Manager. - -This is a part of the backup/restore and upgrade lifecycle processes. -This task is used after an installation has been exported and a new Ops Manager -has been deployed, but before the new Ops Manager is configured. - -=== "Task" - ---excerpt--- "tasks/import-installation" -=== "Implementation" - ---excerpt--- "tasks/import-installation-script" -=== "Usage" - ---excerpt--- "examples/import-installation-usage" - -### make-git-commit -Copies a single file into a repo and makes a commit. -Useful for persisting the state output of tasks that manage the VM, such as: - -- [create-vm](#create-vm) -- [upgrade-opsman](#upgrade-opsman) -- [delete-vm](#delete-vm) - -Also useful for persisting the configuration output from: - -- [staged-config](#staged-config) -- [staged-director-config](#staged-director-config) - -!!! info - This commits **all changes** present - in the repo used for the `repository` input, - in addition to copying in a single file. - -!!! info - This does not perform a `git push`! - You will need to `put` the output of this task to a git resource to persist it. - -=== "Task" - ---excerpt--- "tasks/make-git-commit" -=== "Implementation" - ---excerpt--- "tasks/make-git-commit-script" -=== "Usage" - ---excerpt--- "examples/make-git-commit-usage" - -### pre-deploy-check -Checks if the Ops Manager director is configured properly and validates the configuration. -This feature is only available in Ops Manager 2.6+. -Additionally, checks each of the staged products -and validates they are configured correctly. -This task can be run at any time -and can be used a a pre-check for [`apply-changes`][apply-changes]. - -The checks that this task executes are: - -- is configuration complete and valid -- is the network assigned -- is the availability zone assigned -- is the stemcell assigned -- what stemcell type/version is required -- are there any unset/invalid properties -- did any ops manager verifiers fail - -If any of the above checks fail -the task will fail. -The failed task will provide a list of errors that need to be fixed -before an `apply-changes` could start. - -=== "Task" - ---excerpt--- "tasks/pre-deploy-check" -=== "Implementation" - ---excerpt--- "tasks/pre-deploy-check-script" -=== "Usage" - ---excerpt--- "reference/pre-deploy-check-usage" - -### prepare-image -This task modifies the container image with runtime dependencies. -`CA_CERTS` can be added, -which can help secure HTTP connections with a proxy server -and allows the use of a custom CA on the Ops Manager. - -!!! warn "Concourse 5+ Only" - This task uses a Concourse feature - that allows inputs and outputs to have the same name. - This feature is only available in Concourse 5+. - `prepare-image` does not work with Concourse 4. - -=== "Task" - ---excerpt--- "tasks/prepare-image" -=== "Implementation" - ---excerpt--- "tasks/prepare-image-script" -=== "Usage" - ---excerpt--- "reference/prepare-image-usage" - -### prepare-tasks-with-secrets -Modifies task files to include variables needed for config files as environment variables -for run-time interpolation from a secret store. -Learn more about [secrets handling][secrets-handling]. - -!!! warn "Concourse 5+ Only" - This task uses a Concourse feature - that allows inputs and outputs to have the same name. - This feature is only available in Concourse 5+. - `prepare-tasks-with-secrets` does not work with Concourse 4. - -=== "Task" - ---excerpt--- "tasks/prepare-tasks-with-secrets" -=== "Implementation" - ---excerpt--- "tasks/prepare-tasks-with-secrets-script" -=== "Usage" - ---excerpt--- "reference/prepare-tasks-with-secrets-usage" - -### regenerate-certificates - -Regenerates all non-configurable leaf certificates managed by Ops Manager using -the active certificate authority. - -=== "Task" - ---excerpt--- "tasks/regenerate-certificates" -=== "Implementation" - ---excerpt--- "tasks/regenerate-certificates-script" -=== "Usage" - ---excerpt--- "reference/regenerate-certificates-usage" - -### replicate-product -Will replicate the product for use in isolation segments. -The task requires a downloaded product prior to replication. -The output is a replicated tile with a new name in the metadata and filename. - -!!! info "Using replicate-product" - This command does not support storing the replicated product - in a non-versioned blobstore, because it cannot generate a unique name. - It is recommended to use the replicated tile immediately in the next task - rather than storing it and using it in a different job. - -=== "Task" - ---excerpt--- "tasks/replicate-product" -=== "Implementation" - ---excerpt--- "tasks/replicate-product-script" -=== "Usage" - ---excerpt--- "examples/replicate-product-usage" - -### revert-staged-changes -Reverts all changes that are currently staged on the Ops Manager. -This is only available _for_ Ops Manager 2.5.21+, 2.6.13+, or 2.7.2+ - -!!! warning "Using revert-staged-changes" - Since this reverts all changes on an Ops Manager, - it can conflict with tasks that perform stage or configure operations. - Use passed constraints to ensure things run in the order you mean them to. - -=== "Task" - ---excerpt--- "tasks/revert-staged-changes" -=== "Implementation" - ---excerpt--- "tasks/revert-staged-changes-script" -=== "Usage" - ---excerpt--- "reference/revert-staged-changes-usage" - -### run-bosh-errand -Runs a specified BOSH errand directly on the BOSH Director -by tunneling through Ops Manager. - -!!! warning "Interacting with the BOSH Director" - Ops Manager is the main interface for interacting with BOSH, - and it has no way of knowing what is happening to the BOSH Director - outside of the Ops Manager UI context. - By using this task, you are accepting the risk - that what you are doing cannot be tracked by your Ops Manager. - -!!! warning "Retrying Errands" - Ops Manager, by design, will re-run failed errands for you. - As this task interacts with BOSH directly, - your errand will not be re-run if it fails. - To replicate this retry behavior in your pipeline, - you can take advantage of the [`attempts`][concourse-attempts] feature - of Concourse to run the task more than once. - -=== "Task" - ---excerpt--- "tasks/run-bosh-errand" -=== "Implementation" - ---excerpt--- "tasks/run-bosh-errand-script" -=== "Usage" - ---excerpt--- "reference/run-bosh-errand-usage" - -### send-telemetry -Sends the `.tar` output from [`collect-telemetry`][collect-telemetry] -to VMware. - -!!! info Telemetry Key - In order to use this task, - you will need to acquire a license key. - Contact your VMware Representative. - -=== "Task" - ---excerpt--- "tasks/send-telemetry" -=== "Implementation" - ---excerpt--- "tasks/send-telemetry-script" -=== "Usage" - ---excerpt--- "reference/send-telemetry-usage" - -### stage-configure-apply -This is an _advanced task_. -Stage a product to Ops Manager, configure that product, and apply changes -only to that product without applying changes to the rest of the foundation. - -{% include '.disable-verifiers.md' %} - -=== "Task" - ---excerpt--- "tasks/stage-configure-apply" -=== "Implementation" - ---excerpt--- "tasks/stage-configure-apply-script" -=== "Usage" - ---excerpt--- "reference/stage-configure-apply-usage" - -### stage-product -Staged a product to the Ops Manager specified in the config file. - -=== "Task" - ---excerpt--- "tasks/stage-product" -=== "Implementation" - ---excerpt--- "tasks/stage-product-script" -=== "Usage" - ---excerpt--- "reference/stage-product-usage" - -### staged-config -Downloads the configuration for a product from Ops Manager. - -Not to be confused with Ops Manager's -built-in [export][bbr-export], -which puts all deployed products and configurations into a single file, -intended for import as part of backup/restore and upgrade lifecycle processes. - -=== "Task" - ---excerpt--- "tasks/staged-config" -=== "Implementation" - ---excerpt--- "tasks/staged-config-script" -=== "Usage" - ---excerpt--- "examples/staged-config-usage" - -### staged-director-config - -{% include "./.opsman_filename_change_note.md" %} - -Downloads configuration for the BOSH director from Ops Manager. - -=== "Task" - ---excerpt--- "tasks/staged-director-config" -=== "Implementation" - ---excerpt--- "tasks/staged-director-config-script" -=== "Usage" - ---excerpt--- "examples/staged-director-config-usage" - -The configuration is exported to the `generated-config` output. -It does not extract credentials from Ops Manager -and replaced them all with YAML interpolation `(())` placeholders. -This is to ensure that credentials are never written to disk. -The credentials need to be provided from an external configuration when invoking [configure-director](#configure-director). - -{% include ".missing_fields_opsman_director.md" %} - -### test -An example task to ensure the assets and docker image are setup correctly in your concourse pipeline. - -=== "Task" - ---excerpt--- "tasks/test" -=== "Implementation" - ---excerpt--- "tasks/test-script" -=== "Usage" - ---excerpt--- "reference/test-usage" - -### test-interpolate -An example task to ensure that all required vars are present when interpolating into a base file. -For more instruction on this topic, see the [variables](concepts/variables.md) section - -=== "Task" - ---excerpt--- "tasks/test-interpolate" -=== "Implementation" - ---excerpt--- "tasks/test-interpolate-script" -=== "Usage" - ---excerpt--- "reference/test-interpolate-usage" - -### update-runtime-config -This is an _advanced task_. -Updates a runtime config on the Ops Manager deployed BOSH director. -The task will interact with the BOSH director (sometimes via SSH tunnel through the Ops Manager), -upload BOSH releases, -and set a named runtime config. -This is useful when installing agents on BOSH deployed VMs that don't have a Ops Manager tile. - -=== "Task" - ---excerpt--- "tasks/update-runtime-config" -=== "Implementation" - ---excerpt--- "tasks/update-runtime-config-script" -=== "Usage" - ---excerpt--- "examples/update-runtime-config-usage" - -!!! warn - When using runtime configs, Ops Manager _owns_ the default runtime config. - If you use this task to edit "default" it will be replaced on every Apply Changes. - Please use `NAME` param to provide a non-conflicting runtime config. - -### upgrade-opsman -Upgrades an existing Ops Manager to a new given Ops Manager version - -=== "Task" - ---excerpt--- "tasks/upgrade-opsman" -=== "Implementation" - ---excerpt--- "tasks/upgrade-opsman-script" -=== "Usage" - ---excerpt--- "reference/upgrade-opsman-usage" - -For more information about this task and how it works, see the [upgrade](concepts/upgrade.md) page. - -### upload-and-stage-product -Uploads and stages product to the Ops Manager specified in the config file. - -=== "Task" - ---excerpt--- "tasks/upload-and-stage-product" -=== "Implementation" - ---excerpt--- "tasks/upload-and-stage-product-script" -=== "Usage" - ---excerpt--- "reference/upload-and-stage-product-usage" - -### upload-product -Uploads a product to the Ops Manager specified in the config file. - -If a shasum is provided in the config.yml, -the integrity product will be verified -with that shasum before uploading. - -=== "Task" - ---excerpt--- "tasks/upload-product" -=== "Implementation" - ---excerpt--- "tasks/upload-product-script" -=== "Usage" - ---excerpt--- "reference/upload-product-usage" - -### upload-stemcell -Uploads a stemcell to Ops Manager. - -Note that the filename of the stemcell must be exactly as downloaded from Tanzu Network. -Ops Manager parses this filename to determine the version and OS of the stemcell. - -=== "Task" - ---excerpt--- "tasks/upload-stemcell" -=== "Implementation" - ---excerpt--- "tasks/upload-stemcell-script" -=== "Usage" - ---excerpt--- "reference/upload-stemcell-usage" - -{% include ".internal_link_url.md" %} -{% include ".external_link_url.md" %} diff --git a/docs/toc.html.md.erb b/docs/toc.html.md.erb new file mode 100644 index 00000000..ccf4264b --- /dev/null +++ b/docs/toc.html.md.erb @@ -0,0 +1,30 @@ +# Platform Automation Toolkit for VMware Tanzu + +* [Overview](./index.html) +* [Release Notes](./release-notes.html) +* [Compatibility and versioning](./compatibility-and-versioning.html) +* [Getting started](./getting-started.html) +* [**Reference pipelines**](./TOC/reference-pipelines-index.html) + * [Retrieving external dependencies](./pipelines/resources.html) + * [Operations Manager & multiple products](./pipelines/multiple-products.html) +* [**How-to guides**](./TOC/how-to-index.html) + * [Writing a pipeline to install Operations Manager](./how-to-guides/installing-opsman.html) + * [Upgrading an existing Operations Manager](./how-to-guides/upgrade-existing-opsman.html) + * [Generating an Auth file](./how-to-guides/configuring-auth.html) + * [Generating an Env file](./how-to-guides/configuring-env.html) + * [Creating a director config file](./how-to-guides/creating-a-director-config-file.html) + * [Extending a pipeline to install a product](./how-to-guides/adding-a-product.html) + * [Why use Git and GitHub?](./how-to-guides/git-repo-layout.html) + * [Running commands locally](./how-to-guides/running-commands-locally.html) + * [Setting up S3 for file storage](./how-to-guides/setting-up-s3.html) + * [Writing a pipeline to rotate the foundation CA](./how-to-guides/rotating-certificate-authority.html) +* [**Concepts**](./TOC/concepts-index.html) + * [Using a secrets store to store credentials](./concepts/secrets-handling.html) + * [Handling stemcells](./concepts/stemcell-handling.html) + * [Variables](./concepts/variables.html) + * [Recovering and upgrading Operations Manager](./concepts/upgrade.html) +* [**Pipeline design**](./TOC/pipeline-design-index.html) + * [Configuration management strategies](./pipeline-design/configuration-management-strategies.html) +* [**Tasks**](./TOC/tasks-index.html) + * [Task reference](./tasks.html) + * [Task inputs and outputs](./inputs-outputs.html) diff --git a/tasks/_activate-certificate-authority-script.html.md.erb b/tasks/_activate-certificate-authority-script.html.md.erb new file mode 100644 index 00000000..5c2f342b --- /dev/null +++ b/tasks/_activate-certificate-authority-script.html.md.erb @@ -0,0 +1,8 @@ +
+
+cat /var/version && echo ""
+set -eux
+
+om --env env/"${ENV_FILE}" activate-certificate-authority
+
+
diff --git a/tasks/_activate-certificate-authority.html.md.erb b/tasks/_activate-certificate-authority.html.md.erb new file mode 100644 index 00000000..d2a2a605 --- /dev/null +++ b/tasks/_activate-certificate-authority.html.md.erb @@ -0,0 +1,20 @@ +
+
+---
+platform: linux
+
+inputs:
+- name: platform-automation-tasks
+- name: env # contains the env file with target OpsMan Information
+
+params:
+  ENV_FILE: env.yml
+  # - Required
+  # - Filepath of the env config YAML
+  # - The path is relative to root of the `env` input
+
+run:
+  path: platform-automation-tasks/tasks/activate-certificate-authority.sh
+
+
+
diff --git a/tasks/apply-changes.sh b/tasks/_apply-changes-script.html.md.erb old mode 100755 new mode 100644 similarity index 86% rename from tasks/apply-changes.sh rename to tasks/_apply-changes-script.html.md.erb index 5865bbf6..f3557bd8 --- a/tasks/apply-changes.sh +++ b/tasks/_apply-changes-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet apply-changes-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -29,4 +28,6 @@ fi
 # shellcheck disable=SC2068
 om --env env/"${ENV_FILE}" apply-changes \
   ${flags[@]}
-# code_snippet apply-changes-script end
+
+
+
diff --git a/tasks/apply-changes.yml b/tasks/_apply-changes.html.md.erb similarity index 69% rename from tasks/apply-changes.yml rename to tasks/_apply-changes.html.md.erb index 88610bea..aa5de0d0 100644 --- a/tasks/apply-changes.yml +++ b/tasks/_apply-changes.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet apply-changes start yaml +
+
 ---
 platform: linux
 
@@ -21,8 +17,8 @@ params:
 
   RECREATE: false
   # - Optional
-  # - If true, will recreate all product vms
-  # - If true, will also recreate the director vm if there are changes
+  # - If true, will recreate all product VMs
+  # - If true, will also recreate the director VM if there are changes
 
   ERRAND_CONFIG_FILE:
   # - Optional
@@ -44,4 +40,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/apply-changes.sh
-# code_snippet apply-changes end
+
+
+
diff --git a/tasks/apply-director-changes.sh b/tasks/_apply-director-changes-script.html.md.erb old mode 100755 new mode 100644 similarity index 71% rename from tasks/apply-director-changes.sh rename to tasks/_apply-director-changes-script.html.md.erb index e9bbe0b3..71151203 --- a/tasks/apply-director-changes.sh +++ b/tasks/_apply-director-changes-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet apply-director-changes-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -14,4 +13,6 @@ fi
 # shellcheck disable=SC2068
 om --env env/"${ENV_FILE}" apply-changes \
   ${flags[@]}
-# code_snippet apply-director-changes-script end
+
+
+
diff --git a/tasks/apply-director-changes.yml b/tasks/_apply-director-changes.html.md.erb similarity index 62% rename from tasks/apply-director-changes.yml rename to tasks/_apply-director-changes.html.md.erb index 5cae74c9..7043c7fd 100644 --- a/tasks/apply-director-changes.yml +++ b/tasks/_apply-director-changes.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet apply-director-changes start yaml +
+
 ---
 platform: linux
 
@@ -26,4 +22,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/apply-director-changes.sh
-# code_snippet apply-director-changes end
+
+
+
diff --git a/tasks/assign-multi-stemcell.sh b/tasks/_assign-multi-stemcell-script.html.md.erb old mode 100755 new mode 100644 similarity index 50% rename from tasks/assign-multi-stemcell.sh rename to tasks/_assign-multi-stemcell-script.html.md.erb index 282741ce..8fc997cf --- a/tasks/assign-multi-stemcell.sh +++ b/tasks/_assign-multi-stemcell-script.html.md.erb @@ -1,8 +1,10 @@ -#!/usr/bin/env bash -# code_snippet assign-multi-stemcell-script start bash +
+
 
 cat /var/version && echo ""
 set -eux
 om --env env/"${ENV_FILE}" assign-multi-stemcell \
   --config config/"${CONFIG_FILE}"
-# code_snippet assign-multi-stemcell-script end
+
+
+
diff --git a/tasks/assign-multi-stemcell.yml b/tasks/_assign-multi-stemcell.html.md.erb similarity index 64% rename from tasks/assign-multi-stemcell.yml rename to tasks/_assign-multi-stemcell.html.md.erb index 9b1e01fd..08f87f0a 100644 --- a/tasks/assign-multi-stemcell.yml +++ b/tasks/_assign-multi-stemcell.html.md.erb @@ -1,10 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet assign-multi-stemcell start yaml -# This feature is only available in OpsMan 2.6+. +
+
 ---
 platform: linux
 
@@ -32,4 +27,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/assign-multi-stemcell.sh
-# code_snippet assign-multi-stemcell end
+
+
+
diff --git a/tasks/assign-stemcell.sh b/tasks/_assign-stemcell-script.html.md.erb old mode 100755 new mode 100644 similarity index 51% rename from tasks/assign-stemcell.sh rename to tasks/_assign-stemcell-script.html.md.erb index ec14a1b4..5b2cb5a7 --- a/tasks/assign-stemcell.sh +++ b/tasks/_assign-stemcell-script.html.md.erb @@ -1,8 +1,9 @@ -#!/usr/bin/env bash -# code_snippet assign-stemcell-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 om --env env/"${ENV_FILE}" assign-stemcell \
   --config config/"${CONFIG_FILE}"
-# code_snippet assign-stemcell-script end
+
+
+
diff --git a/tasks/assign-stemcell.yml b/tasks/_assign-stemcell.html.md.erb similarity index 67% rename from tasks/assign-stemcell.yml rename to tasks/_assign-stemcell.html.md.erb index 755c69c7..e29bea33 100644 --- a/tasks/assign-stemcell.yml +++ b/tasks/_assign-stemcell.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet assign-stemcell start yaml +
+
 ---
 platform: linux
 
@@ -30,4 +26,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/assign-stemcell.sh
-# code_snippet assign-stemcell end
+
+
+
diff --git a/tasks/backup-director.sh b/tasks/_backup-director-script.html.md.erb old mode 100755 new mode 100644 similarity index 87% rename from tasks/backup-director.sh rename to tasks/_backup-director-script.html.md.erb index 149f2f83..7d80bed3 --- a/tasks/backup-director.sh +++ b/tasks/_backup-director-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet backup-director-script start bash - +
+
 cat /var/version && echo ""
 set -eu
 
@@ -27,4 +26,5 @@ pushd backup
 
   tar -zcvf director_"$( date +"%Y-%m-%d-%H-%M-%S" )".tgz --remove-files -- */*
 popd
-# code_snippet backup-director-script end
+
+
diff --git a/tasks/backup-director.yml b/tasks/_backup-director.html.md.erb similarity index 73% rename from tasks/backup-director.yml rename to tasks/_backup-director.html.md.erb index 87b2876e..ca1fbfe0 100644 --- a/tasks/backup-director.yml +++ b/tasks/_backup-director.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet backup-director start yaml +
+
 ---
 platform: linux
 
@@ -38,4 +34,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/backup-director.sh
-# code_snippet backup-director end
+
+
+
diff --git a/tasks/backup-product.sh b/tasks/_backup-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 87% rename from tasks/backup-product.sh rename to tasks/_backup-product-script.html.md.erb index dc53b690..90a9396c --- a/tasks/backup-product.sh +++ b/tasks/_backup-product-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet backup-product-script start bash - +
+
 cat /var/version && echo ""
 set -eu
 
@@ -29,4 +28,5 @@ pushd backup
 
   tar -zcvf product_"${PRODUCT_NAME}"_"$( date +"%Y-%m-%d-%H-%M-%S" )".tgz --remove-files -- */*
 popd
-# code_snippet backup-product-script end
+
+
diff --git a/tasks/backup-product.yml b/tasks/_backup-product.html.md.erb similarity index 74% rename from tasks/backup-product.yml rename to tasks/_backup-product.html.md.erb index ca33a0dc..a2935229 100644 --- a/tasks/backup-product.yml +++ b/tasks/_backup-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet backup-product start yaml +
+
 ---
 platform: linux
 
@@ -38,4 +34,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/backup-product.sh
-# code_snippet backup-product end
+
+
+
diff --git a/tasks/backup-tkgi.sh b/tasks/_backup-tkgi-script.html.md.erb old mode 100755 new mode 100644 similarity index 91% rename from tasks/backup-tkgi.sh rename to tasks/_backup-tkgi-script.html.md.erb index d9f50c80..fe4acd59 --- a/tasks/backup-tkgi.sh +++ b/tasks/_backup-tkgi-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet backup-tkgi-script start bash - +
+
 cat /var/version && echo ""
 set -eu
 
@@ -38,4 +37,5 @@ pushd backup
     --exclude "${PRODUCT_NAME}"_*.tgz \
     --remove-files -- */*
 popd
-# code_snippet backup-tkgi-script end
+
+
diff --git a/tasks/backup-tkgi.yml b/tasks/_backup-tkgi.html.md.erb similarity index 74% rename from tasks/backup-tkgi.yml rename to tasks/_backup-tkgi.html.md.erb index 85f8d53d..757294ae 100644 --- a/tasks/backup-tkgi.yml +++ b/tasks/_backup-tkgi.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet backup-tkgi start yaml +
+
 ---
 platform: linux
 
@@ -35,4 +31,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/backup-tkgi.sh
-# code_snippet backup-tkgi end
+
+
+
diff --git a/tasks/check-pending-changes.sh b/tasks/_check-pending-changes-script.html.md.erb old mode 100755 new mode 100644 similarity index 68% rename from tasks/check-pending-changes.sh rename to tasks/_check-pending-changes-script.html.md.erb index 8bbd41aa..3a2026f8 --- a/tasks/check-pending-changes.sh +++ b/tasks/_check-pending-changes-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet check-pending-changes-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -13,4 +12,6 @@ fi
 # shellcheck disable=SC2068
 om --env env/"${ENV_FILE}" pending-changes \
   ${flags[@]}
-# code_snippet check-pending-changes-script end
+
+
+
diff --git a/tasks/check-pending-changes.yml b/tasks/_check-pending-changes.html.md.erb similarity index 56% rename from tasks/check-pending-changes.yml rename to tasks/_check-pending-changes.html.md.erb index ebcc9645..c4531017 100644 --- a/tasks/check-pending-changes.yml +++ b/tasks/_check-pending-changes.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet check-pending-changes start yaml +
+
 ---
 platform: linux
 
@@ -23,4 +19,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/check-pending-changes.sh
-# code_snippet check-pending-changes end
+
+
+
diff --git a/tasks/collect-telemetry.sh b/tasks/_collect-telemetry-script.html.md.erb old mode 100755 new mode 100644 similarity index 87% rename from tasks/collect-telemetry.sh rename to tasks/_collect-telemetry-script.html.md.erb index 251671f1..f93615c6 --- a/tasks/collect-telemetry.sh +++ b/tasks/_collect-telemetry-script.html.md.erb @@ -1,5 +1,5 @@ -#!/bin/bash -# code_snippet collect-telemetry-script start bash +
+
 set -eux
 
 cat config/"${CONFIG_FILE}" env/"${ENV_FILE}" >combined-config.yml
@@ -24,4 +24,6 @@ om --env env/"${ENV_FILE}" curl --path /api/v0/info >/dev/null 2>&1
 ./telemetry-collector-binary/telemetry-collector-linux-amd64 collect \
   --output-dir ./collected-telemetry-data \
   --config /tmp/pipe.yml
-# code_snippet collect-telemetry-script end bash
+
+
+
diff --git a/tasks/collect-telemetry.yml b/tasks/_collect-telemetry.html.md.erb similarity index 73% rename from tasks/collect-telemetry.yml rename to tasks/_collect-telemetry.html.md.erb index 069343c4..d5eeba17 100644 --- a/tasks/collect-telemetry.yml +++ b/tasks/_collect-telemetry.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet collect-telemetry start yaml +
+
 ---
 platform: linux
 
@@ -38,4 +34,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/collect-telemetry.sh
-# code_snippet collect-telemetry end
+
+
+
diff --git a/tasks/configure-authentication.sh b/tasks/_configure-authentication-script.html.md.erb old mode 100755 new mode 100644 similarity index 74% rename from tasks/configure-authentication.sh rename to tasks/_configure-authentication-script.html.md.erb index b186b72f..cc4684d0 --- a/tasks/configure-authentication.sh +++ b/tasks/_configure-authentication-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet configure-authentication-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -14,4 +13,6 @@ done
 om --env env/"${ENV_FILE}" --skip-ssl-validation configure-authentication \
   --config config/"${AUTH_CONFIG_FILE}" \
   ${vars_files_args[@]}
-# code_snippet configure-authentication-script end
+
+
+
diff --git a/tasks/configure-authentication.yml b/tasks/_configure-authentication.html.md.erb similarity index 63% rename from tasks/configure-authentication.yml rename to tasks/_configure-authentication.html.md.erb index 2e6dc39b..68b1ebdf 100644 --- a/tasks/configure-authentication.yml +++ b/tasks/_configure-authentication.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet configure-authentication start yaml +
+
 ---
 platform: linux
 
@@ -25,10 +21,12 @@ params:
 
   VARS_FILES:
   # - Optional
-  # - Filepath to the Ops Manager vars yaml file
+  # - Filepath to the Ops Manager vars YAML file
   # - The path is relative to root of the task build
   # - These vars can come from the `env` or `config` inputs
 
 run:
   path: platform-automation-tasks/tasks/configure-authentication.sh
-# code_snippet configure-authentication end
+
+
+
diff --git a/tasks/configure-director.sh b/tasks/_configure-director-script.html.md.erb old mode 100755 new mode 100644 similarity index 82% rename from tasks/configure-director.sh rename to tasks/_configure-director-script.html.md.erb index f8ffde5c..df76d0f6 --- a/tasks/configure-director.sh +++ b/tasks/_configure-director-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet configure-director-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -21,4 +20,6 @@ om --env env/"${ENV_FILE}" configure-director \
   --config "config/${DIRECTOR_CONFIG_FILE}" \
   ${vars_files_args[@]} \
   ${ops_files_args[@]}
-# code_snippet configure-director-script end
+
+
+
diff --git a/tasks/configure-director.yml b/tasks/_configure-director.html.md.erb similarity index 66% rename from tasks/configure-director.yml rename to tasks/_configure-director.html.md.erb index fbb1d850..57f62728 100644 --- a/tasks/configure-director.yml +++ b/tasks/_configure-director.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet configure-director start yaml +
+
 ---
 platform: linux
 
@@ -23,13 +19,13 @@ inputs:
 params:
   VARS_FILES:
   # - Optional
-  # - Filepath to the Ops Manager vars yaml file
+  # - Filepath to the Ops Manager vars YAML file
   # - The path is relative to root of the task build,
   #   so `vars` and `secrets` can be used.
 
   OPS_FILES:
   # - Optional
-  # - Filepath to the Ops Manager operations yaml files
+  # - Filepath to the Ops Manager operations YAML files
   # - The path is relative to root of the task build
 
   ENV_FILE: env.yml
@@ -39,9 +35,11 @@ params:
 
   DIRECTOR_CONFIG_FILE: director.yml
   # - Required
-  # - Filepath to the director configuration yaml file
+  # - Filepath to the director configuration YAML file
   # - The path is relative to the root of the `config` input
 
 run:
   path: platform-automation-tasks/tasks/configure-director.sh
-# code_snippet configure-director end
+
+
+
diff --git a/tasks/configure-ldap-authentication.sh b/tasks/_configure-ldap-authentication-script.html.md.erb old mode 100755 new mode 100644 similarity index 73% rename from tasks/configure-ldap-authentication.sh rename to tasks/_configure-ldap-authentication-script.html.md.erb index a621f66d..876fda50 --- a/tasks/configure-ldap-authentication.sh +++ b/tasks/_configure-ldap-authentication-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet configure-ldap-authentication-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -14,4 +13,6 @@ done
 om --env env/"${ENV_FILE}" --skip-ssl-validation configure-ldap-authentication \
   --config config/"${AUTH_CONFIG_FILE}" \
   ${vars_files_args[@]}
-# code_snippet configure-ldap-authentication-script end
+
+
+
diff --git a/tasks/configure-ldap-authentication.yml b/tasks/_configure-ldap-authentication.html.md.erb similarity index 62% rename from tasks/configure-ldap-authentication.yml rename to tasks/_configure-ldap-authentication.html.md.erb index 6c6c0506..f5bca18f 100644 --- a/tasks/configure-ldap-authentication.yml +++ b/tasks/_configure-ldap-authentication.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet configure-ldap-authentication start yaml +
+
 ---
 platform: linux
 
@@ -25,10 +21,12 @@ params:
 
   VARS_FILES:
   # - Optional
-  # - Filepath to the Ops Manager vars yaml file
+  # - Filepath to the Ops Manager vars YAML file
   # - The path is relative to root of the task build
   # - These vars can come from the `env` or `config` inputs
 
 run:
   path: platform-automation-tasks/tasks/configure-ldap-authentication.sh
-# code_snippet configure-ldap-authentication end
+
+
+
diff --git a/tasks/configure-new-certificate-authority.sh b/tasks/_configure-new-certificate-authority-script.html.md.erb old mode 100755 new mode 100644 similarity index 70% rename from tasks/configure-new-certificate-authority.sh rename to tasks/_configure-new-certificate-authority-script.html.md.erb index b76230df..6e5cba06 --- a/tasks/configure-new-certificate-authority.sh +++ b/tasks/_configure-new-certificate-authority-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet configure-new-certificate-authority-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -12,4 +11,6 @@ if [[ -f "certs/certificate.pem" && -f "certs/privatekey.pem" ]]; then
 else
   om --env env/"${ENV_FILE}" generate-certificate-authority
 fi
-# code_snippet configure-new-certificate-authority-script end
+
+
+
diff --git a/tasks/configure-new-certificate-authority.yml b/tasks/_configure-new-certificate-authority.html.md.erb similarity index 55% rename from tasks/configure-new-certificate-authority.yml rename to tasks/_configure-new-certificate-authority.html.md.erb index 6992e7fb..60baf1ad 100644 --- a/tasks/configure-new-certificate-authority.yml +++ b/tasks/_configure-new-certificate-authority.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet configure-new-certificate-authority start yaml +
+
 ---
 platform: linux
 
@@ -21,4 +17,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/configure-new-certificate-authority.sh
-# code_snippet configure-new-certificate-authority end
\ No newline at end of file
+
+
+
diff --git a/tasks/configure-opsman.sh b/tasks/_configure-opsman-script.html.md.erb old mode 100755 new mode 100644 similarity index 75% rename from tasks/configure-opsman.sh rename to tasks/_configure-opsman-script.html.md.erb index b41b7e90..0ea74b4c --- a/tasks/configure-opsman.sh +++ b/tasks/_configure-opsman-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet configure-opsman-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -15,4 +14,5 @@ om --env env/"${ENV_FILE}" configure-opsman \
   --config "config/${OPSMAN_CONFIG_FILE}" \
   ${vars_files_args[@]}
 
-# code_snippet configure-opsman-script end
+
+
diff --git a/tasks/configure-opsman.yml b/tasks/_configure-opsman.html.md.erb similarity index 73% rename from tasks/configure-opsman.yml rename to tasks/_configure-opsman.html.md.erb index 854f61a1..fb7cc1eb 100644 --- a/tasks/configure-opsman.yml +++ b/tasks/_configure-opsman.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet configure-opsman start yaml +
+
 ---
 platform: linux
 
@@ -35,9 +31,11 @@ params:
   # - filepath of the Ops Manager Application Settings
   #   config file. (such as banner, pivnet token, etc)
   # - relative to root of the `config` input
-  # - It is recommended to use one config file to
+  # - VMware recommends using one config file to
   #   configure-opsman, upgrade-opsman, create-vm, delete-vm
 
 run:
   path: platform-automation-tasks/tasks/configure-opsman.sh
-# code_snippet configure-opsman end
+
+
+
diff --git a/tasks/configure-product.sh b/tasks/_configure-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 81% rename from tasks/configure-product.sh rename to tasks/_configure-product-script.html.md.erb index fdbaf06d..2edab52c --- a/tasks/configure-product.sh +++ b/tasks/_configure-product-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet configure-product-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -21,4 +20,6 @@ om --env env/"${ENV_FILE}" configure-product \
   --config "config/${CONFIG_FILE}" \
   ${vars_files_args[@]} \
   ${ops_files_args[@]}
-# code_snippet configure-product-script end
+
+
+
diff --git a/tasks/configure-product.yml b/tasks/_configure-product.html.md.erb similarity index 65% rename from tasks/configure-product.yml rename to tasks/_configure-product.html.md.erb index ec2deb6a..c54a884d 100644 --- a/tasks/configure-product.yml +++ b/tasks/_configure-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet configure-product start yaml +
+
 ---
 platform: linux
 
@@ -23,18 +19,18 @@ inputs:
 params:
   CONFIG_FILE:
   # - Required
-  # - Filepath to the product configuration yaml file
+  # - Filepath to the product configuration YAML file
   # - The path is relative to the root of the `config` input
 
   VARS_FILES:
   # - Optional
-  # - Filepath to the product configuration vars yaml file
+  # - Filepath to the product configuration vars YAML file
   # - The path is relative to root of the task build,
   #   so `vars` and `secrets` can be used.
 
   OPS_FILES:
   # - Optional
-  # - Filepath to the product configuration operations yaml files
+  # - Filepath to the product configuration operations YAML files
   # - The path is relative to root of the task build
 
   ENV_FILE: env.yml
@@ -44,4 +40,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/configure-product.sh
-# code_snippet configure-product end
+
+
+
diff --git a/tasks/configure-saml-authentication.sh b/tasks/_configure-saml-authentication-script.html.md.erb old mode 100755 new mode 100644 similarity index 73% rename from tasks/configure-saml-authentication.sh rename to tasks/_configure-saml-authentication-script.html.md.erb index b11ab067..ed676327 --- a/tasks/configure-saml-authentication.sh +++ b/tasks/_configure-saml-authentication-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet configure-saml-authentication-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -14,4 +13,6 @@ done
 om --env env/"${ENV_FILE}" --skip-ssl-validation configure-saml-authentication \
   --config config/"${AUTH_CONFIG_FILE}" \
   ${vars_files_args[@]}
-# code_snippet configure-saml-authentication-script end
+
+
+
diff --git a/tasks/configure-saml-authentication.yml b/tasks/_configure-saml-authentication.html.md.erb similarity index 62% rename from tasks/configure-saml-authentication.yml rename to tasks/_configure-saml-authentication.html.md.erb index 83646ab2..50760755 100644 --- a/tasks/configure-saml-authentication.yml +++ b/tasks/_configure-saml-authentication.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet configure-saml-authentication start yaml +
+
 ---
 platform: linux
 
@@ -25,10 +21,12 @@ params:
 
   VARS_FILES:
   # - Optional
-  # - Filepath to the Ops Manager vars yaml file
+  # - Filepath to the Ops Manager vars YAML file
   # - The path is relative to root of the task build
   # - These vars can come from the `env` or `config` inputs
 
 run:
   path: platform-automation-tasks/tasks/configure-saml-authentication.sh
-# code_snippet configure-saml-authentication end
+
+
+
diff --git a/tasks/create-vm.sh b/tasks/_create-vm-script.html.md.erb old mode 100755 new mode 100644 similarity index 93% rename from tasks/create-vm.sh rename to tasks/_create-vm-script.html.md.erb index b4d2deb4..880ecf0c --- a/tasks/create-vm.sh +++ b/tasks/_create-vm-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet create-vm-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -42,5 +41,5 @@ om vm-lifecycle create-vm \
 # input_state_file need to be globbed (SC2086)
 # shellcheck disable=SC2086
 cp state/${input_state_file} "generated-state/${generated_state_file_name}"
-
-# code_snippet create-vm-script end
+
+
diff --git a/tasks/create-vm.yml b/tasks/_create-vm.html.md.erb similarity index 76% rename from tasks/create-vm.yml rename to tasks/_create-vm.html.md.erb index d132b821..04606221 100644 --- a/tasks/create-vm.yml +++ b/tasks/_create-vm.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet create-vm start yaml +
+
 ---
 platform: linux
 
@@ -26,7 +22,7 @@ outputs:
 params:
   VARS_FILES:
   # - Optional
-  # - Filepath to the Ops Manager vars yaml file
+  # - Filepath to the Ops Manager vars YAML file
   # - The path is relative to root of the task build,
   #   so `vars` and `secrets` can be used.
 
@@ -37,7 +33,7 @@ params:
 
   STATE_FILE: state.yml
   # - Required
-  # - Filepath of the state yaml file
+  # - Filepath of the state YAML file
   # - The path is relative to root of the `state` output
   # - if the filename includes "$timestamp",
   #   for example "state-$timestamp.yml",
@@ -50,4 +46,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/create-vm.sh
-# code_snippet create-vm end
+
+
+
diff --git a/tasks/credhub-interpolate.sh b/tasks/_credhub-interpolate-script.html.md.erb old mode 100755 new mode 100644 similarity index 88% rename from tasks/credhub-interpolate.sh rename to tasks/_credhub-interpolate-script.html.md.erb index 7bb4ffb8..5c17affb --- a/tasks/credhub-interpolate.sh +++ b/tasks/_credhub-interpolate-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet credhub-interpolate-script start bash - +
+
 cat /var/version && echo ""
 set -euo pipefail
 
@@ -36,5 +35,5 @@ for file in ${files}; do
     --file files/"${file}" ${flags[@]} \
     >interpolated-files/"${file}"
 done
-
-# code_snippet credhub-interpolate-script end
+
+
diff --git a/tasks/credhub-interpolate.yml b/tasks/_credhub-interpolate.html.md.erb similarity index 75% rename from tasks/credhub-interpolate.yml rename to tasks/_credhub-interpolate.html.md.erb index f00c6a8f..e43b3d0c 100644 --- a/tasks/credhub-interpolate.yml +++ b/tasks/_credhub-interpolate.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet credhub-interpolate start yaml +
+
 ---
 platform: linux
 
@@ -17,7 +13,7 @@ inputs:
 
 outputs:
 - name: interpolated-files
-# Contains only yaml files found and interpolated by this task.
+# Contains only YAML files found and interpolated by this task.
 # Maintains the filestructure of the `files` input.
 
 # all params are required to be filled out
@@ -32,7 +28,7 @@ params:
   CREDHUB_CA_CERT:
   # - Optional
   # - This is only necessary if your Concourse worker
-  #   is not already configured to trust the CA used for Credhub.
+  #   is not already configured to trust the CA used for CredHub.
   # - If more than one CA cert is required (ie the UAA),
   #   the CA certs can be concatenated together and separated by a newline.
   #   For example,
@@ -61,4 +57,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/credhub-interpolate.sh
-# code_snippet credhub-interpolate end
+
+
+
diff --git a/tasks/_delete-certificate-authority-script.html.md.erb b/tasks/_delete-certificate-authority-script.html.md.erb new file mode 100644 index 00000000..7d34573f --- /dev/null +++ b/tasks/_delete-certificate-authority-script.html.md.erb @@ -0,0 +1,8 @@ +
+
+cat /var/version && echo ""
+set -eux
+
+om --env env/"${ENV_FILE}" delete-certificate-authority --all-inactive
+
+
diff --git a/tasks/_delete-certificate-authority.html.md.erb b/tasks/_delete-certificate-authority.html.md.erb new file mode 100644 index 00000000..7a5ca294 --- /dev/null +++ b/tasks/_delete-certificate-authority.html.md.erb @@ -0,0 +1,19 @@ +
+
+---
+platform: linux
+
+inputs:
+- name: platform-automation-tasks
+- name: env # contains the env file with target OpsMan Information
+
+params:
+  ENV_FILE: env.yml
+  # - Required
+  # - Filepath of the env config YAML
+
+run:
+  path: platform-automation-tasks/tasks/delete-certificate-authority.sh
+
+
+
diff --git a/tasks/_delete-installation-script.html.md.erb b/tasks/_delete-installation-script.html.md.erb new file mode 100644 index 00000000..835eb735 --- /dev/null +++ b/tasks/_delete-installation-script.html.md.erb @@ -0,0 +1,7 @@ +
+
+cat /var/version && echo ""
+set -eux
+om --env env/"${ENV_FILE}" delete-installation --force
+
+
diff --git a/tasks/delete-installation.yml b/tasks/_delete-installation.html.md.erb similarity index 50% rename from tasks/delete-installation.yml rename to tasks/_delete-installation.html.md.erb index 48b11a94..95a68e4e 100644 --- a/tasks/delete-installation.yml +++ b/tasks/_delete-installation.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet delete-installation start yaml +
+
 ---
 platform: linux
 
@@ -19,4 +15,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/delete-installation.sh
-# code_snippet delete-installation end
+
+
+
diff --git a/tasks/delete-vm.sh b/tasks/_delete-vm-script.html.md.erb old mode 100755 new mode 100644 similarity index 92% rename from tasks/delete-vm.sh rename to tasks/_delete-vm-script.html.md.erb index 6671bed7..72afad1e --- a/tasks/delete-vm.sh +++ b/tasks/_delete-vm-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet delete-vm-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -31,5 +30,5 @@ om vm-lifecycle delete-vm \
 # input_state_file need to be globbed (SC2086)
 # shellcheck disable=SC2086
 cp state/${input_state_file} "generated-state/${generated_state_file_name}"
-
-# code_snippet delete-vm-script end
+
+
diff --git a/tasks/delete-vm.yml b/tasks/_delete-vm.html.md.erb similarity index 76% rename from tasks/delete-vm.yml rename to tasks/_delete-vm.html.md.erb index 173bc273..03a54ae2 100644 --- a/tasks/delete-vm.yml +++ b/tasks/_delete-vm.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet delete-vm start yaml +
+
 ---
 platform: linux
 
@@ -24,7 +20,7 @@ outputs:
 params:
   VARS_FILES:
   # - Optional
-  # - Filepath to the Ops Manager vars yaml file
+  # - Filepath to the Ops Manager vars YAML file
   # - The path is relative to root of the task build,
   #   so `vars` and `secrets` can be used.
 
@@ -35,7 +31,7 @@ params:
 
   STATE_FILE: state.yml
   # - Required
-  # - Filepath of the state yaml file
+  # - Filepath of the state YAML file
   # - The path is relative to root of the `state` output
   # - if the filename includes "$timestamp",
   #   for example "state-$timestamp.yml",
@@ -48,4 +44,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/delete-vm.sh
-# code_snippet delete-vm end
+
+
+
diff --git a/tasks/download-and-upload-product.sh b/tasks/_download-and-upload-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 91% rename from tasks/download-and-upload-product.sh rename to tasks/_download-and-upload-product-script.html.md.erb index e44412e0..e7a7ad4e --- a/tasks/download-and-upload-product.sh +++ b/tasks/_download-and-upload-product-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet download-and-upload-product-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -48,4 +47,5 @@ if [ "${downloaded_stemcell}" != "" ]; then
   om --env env/"${ENV_FILE}" upload-stemcell \
     --stemcell "${downloaded_stemcell}" "${floatingArg}"
 fi
-# code_snippet download-and-upload-product-script end
+
+
diff --git a/tasks/download-and-upload-product.yml b/tasks/_download-and-upload-product.html.md.erb similarity index 68% rename from tasks/download-and-upload-product.yml rename to tasks/_download-and-upload-product.html.md.erb index 15ed85bd..7b19304d 100644 --- a/tasks/download-and-upload-product.yml +++ b/tasks/_download-and-upload-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet download-and-upload-product start yaml +
+
 ---
 platform: linux
 
@@ -25,12 +21,12 @@ params:
 
   CONFIG_FILE: download-config.yml
   # - Required
-  # - Filepath to the product configuration yaml file
+  # - Filepath to the product configuration YAML file
   # - The path is relative to the root of the `config` input
 
   VARS_FILES:
   # - Optional
-  # - Filepath to the product configuration vars yaml file
+  # - Filepath to the product configuration vars YAML file
   # - The path is relative to root of the task build,
   #   so `vars` and `secrets` can be used.
 
@@ -41,4 +37,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/download-and-upload-product.sh
-# code_snippet download-and-upload-product end
+
+
+
diff --git a/tasks/download-product.sh b/tasks/_download-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 92% rename from tasks/download-product.sh rename to tasks/_download-product-script.html.md.erb index 02a9cbb6..071e1c0e --- a/tasks/download-product.sh +++ b/tasks/_download-product-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet download-product-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -49,4 +48,5 @@ if [ -e downloaded-product/assign-stemcell.yml ]; then
 fi
 
 rm -f downloaded-product/download-file.json
-# code_snippet download-product-script end
+
+
diff --git a/tasks/download-product.yml b/tasks/_download-product.html.md.erb similarity index 67% rename from tasks/download-product.yml rename to tasks/_download-product.html.md.erb index 24e40c81..27cd64b5 100644 --- a/tasks/download-product.yml +++ b/tasks/_download-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet download-product start yaml +
+
 ---
 platform: linux
 
@@ -29,12 +25,12 @@ caches:
 params:
   CONFIG_FILE: download-config.yml
   # - Required
-  # - Filepath to the product configuration yaml file
+  # - Filepath to the product configuration YAML file
   # - The path is relative to the root of the `config` input
 
   VARS_FILES:
   # - Optional
-  # - Filepath to the product configuration vars yaml file
+  # - Filepath to the product configuration vars YAML file
   # - The path is relative to root of the task build,
   #   so `vars` and `secrets` can be used.
 
@@ -45,4 +41,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/download-product.sh
-# code_snippet download-product end
+
+
+
diff --git a/tasks/expiring-certificates.sh b/tasks/_expiring-certificates-script.html.md.erb old mode 100755 new mode 100644 similarity index 64% rename from tasks/expiring-certificates.sh rename to tasks/_expiring-certificates-script.html.md.erb index 9e1440de..18cea390 --- a/tasks/expiring-certificates.sh +++ b/tasks/_expiring-certificates-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet expiring-certificates-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -11,4 +10,6 @@ fi
 
 om --env env/"${ENV_FILE}" expiring-certificates \
   --expires-within "${EXPIRES_WITHIN}"
-# code_snippet expiring-certificates-script end
+
+
+
diff --git a/tasks/expiring-certificates.yml b/tasks/_expiring-certificates.html.md.erb similarity index 62% rename from tasks/expiring-certificates.yml rename to tasks/_expiring-certificates.html.md.erb index 62cae820..8ebc8b17 100644 --- a/tasks/expiring-certificates.yml +++ b/tasks/_expiring-certificates.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet expiring-certificates start yaml +
+
 ---
 platform: linux
 
@@ -26,4 +22,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/expiring-certificates.sh
-# code_snippet expiring-certificates end
\ No newline at end of file
+
+
+
\ No newline at end of file diff --git a/tasks/export-installation.sh b/tasks/_export-installation-script.html.md.erb old mode 100755 new mode 100644 similarity index 78% rename from tasks/export-installation.sh rename to tasks/_export-installation-script.html.md.erb index a4c3b2b6..468d41d2 --- a/tasks/export-installation.sh +++ b/tasks/_export-installation-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet export-installation-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -14,4 +13,6 @@ output_file_name="$(echo "${INSTALLATION_FILE}" | envsubst '$timestamp')"
 
 om --env env/"${ENV_FILE}" export-installation \
   --output-file installation/"${output_file_name}"
-# code_snippet export-installation-script end
+
+
+
diff --git a/tasks/export-installation.yml b/tasks/_export-installation.html.md.erb similarity index 75% rename from tasks/export-installation.yml rename to tasks/_export-installation.html.md.erb index 1fa86209..b717ae85 100644 --- a/tasks/export-installation.yml +++ b/tasks/_export-installation.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet export-installation start yaml +
+
 ---
 platform: linux
 
@@ -35,4 +31,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/export-installation.sh
-# code_snippet export-installation end
\ No newline at end of file
+
+
+
diff --git a/tasks/generate-certificate.sh b/tasks/_generate-certificate-script.html.md.erb old mode 100755 new mode 100644 similarity index 70% rename from tasks/generate-certificate.sh rename to tasks/_generate-certificate-script.html.md.erb index 7c4d7796..04121560 --- a/tasks/generate-certificate.sh +++ b/tasks/_generate-certificate-script.html.md.erb @@ -1,10 +1,10 @@ -#!/usr/bin/env bash -# code_snippet generate-certificate-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
 om --env env/"${ENV_FILE}" generate-certificate -d "${DOMAINS}" > /tmp/certificate.json
 om interpolate -c /tmp/certificate.json --path /certificate > certificate/certificate.pem
 om interpolate -c /tmp/certificate.json --path /key > certificate/privatekey.pem
-# code_snippet generate-certificate-script end
+
+
diff --git a/tasks/generate-certificate.yml b/tasks/_generate-certificate.html.md.erb similarity index 60% rename from tasks/generate-certificate.yml rename to tasks/_generate-certificate.html.md.erb index d91e209e..12aff170 100644 --- a/tasks/generate-certificate.yml +++ b/tasks/_generate-certificate.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet generate-certificate start yaml +
+
 ---
 platform: linux
 
@@ -26,4 +22,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/generate-certificate.sh
-# code_snippet generate-certificate end
\ No newline at end of file
+
+
+
diff --git a/tasks/import-installation.sh b/tasks/_import-installation-script.html.md.erb old mode 100755 new mode 100644 similarity index 65% rename from tasks/import-installation.sh rename to tasks/_import-installation-script.html.md.erb index eb8d3cc3..6db21167 --- a/tasks/import-installation.sh +++ b/tasks/_import-installation-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet import-installation-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -8,4 +7,6 @@ set -eux
 # shellcheck disable=SC2086
 om --env env/"${ENV_FILE}" --skip-ssl-validation import-installation \
   --installation installation/${INSTALLATION_FILE}
-# code_snippet import-installation-script end
+
+
+
diff --git a/tasks/import-installation.yml b/tasks/_import-installation.html.md.erb similarity index 69% rename from tasks/import-installation.yml rename to tasks/_import-installation.html.md.erb index 47b88a1c..a45f3ed0 100644 --- a/tasks/import-installation.yml +++ b/tasks/_import-installation.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet import-installation start yaml +
+
 ---
 platform: linux
 
@@ -28,4 +24,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/import-installation.sh
-# code_snippet import-installation end
\ No newline at end of file
+
+
+
diff --git a/tasks/make-git-commit.sh b/tasks/_make-git-commit-script.html.md.erb old mode 100755 new mode 100644 similarity index 85% rename from tasks/make-git-commit.sh rename to tasks/_make-git-commit-script.html.md.erb index 5f56f6b6..6aa0319b --- a/tasks/make-git-commit.sh +++ b/tasks/_make-git-commit-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet make-git-commit-script start bash - +
+
 cat /var/version && echo ""
 set -eu
 
@@ -23,4 +22,5 @@ if [[ -n $(git status --porcelain) ]]; then
   git add -A
   git commit -m "${COMMIT_MESSAGE}" --allow-empty
 fi
-# code_snippet make-git-commit-script end
+
+
diff --git a/tasks/make-git-commit.yml b/tasks/_make-git-commit.html.md.erb similarity index 79% rename from tasks/make-git-commit.yml rename to tasks/_make-git-commit.html.md.erb index cf76195a..acf3ebd4 100644 --- a/tasks/make-git-commit.yml +++ b/tasks/_make-git-commit.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet make-git-commit start yaml +
+
 ---
 platform: linux
 
@@ -51,4 +47,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/make-git-commit.sh
-# code_snippet make-git-commit end
+
+
+
diff --git a/tasks/_pre-deploy-check-script.html.md.erb b/tasks/_pre-deploy-check-script.html.md.erb new file mode 100644 index 00000000..cd007485 --- /dev/null +++ b/tasks/_pre-deploy-check-script.html.md.erb @@ -0,0 +1,8 @@ +
+
+cat /var/version && echo ""
+set -eux
+
+om --env env/"${ENV_FILE}" pre-deploy-check
+
+
diff --git a/tasks/pre-deploy-check.yml b/tasks/_pre-deploy-check.html.md.erb similarity index 50% rename from tasks/pre-deploy-check.yml rename to tasks/_pre-deploy-check.html.md.erb index dbdbb375..0e373ef6 100644 --- a/tasks/pre-deploy-check.yml +++ b/tasks/_pre-deploy-check.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet pre-deploy-check start yaml +
+
 ---
 platform: linux
 
@@ -19,4 +15,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/pre-deploy-check.sh
-# code_snippet pre-deploy-check end
+
+
+
diff --git a/tasks/prepare-image.sh b/tasks/_prepare-image-script.html.md.erb old mode 100755 new mode 100644 similarity index 88% rename from tasks/prepare-image.sh rename to tasks/_prepare-image-script.html.md.erb index c7f93f6d..a13c532f --- a/tasks/prepare-image.sh +++ b/tasks/_prepare-image-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet prepare-image-script start bash - +
+
 cat /var/version && echo ""
 set -eu
 
@@ -27,5 +26,5 @@ update-ca-certificates
 rsync -al /etc/ssl/certs/ "${PWD}"/platform-automation-image/rootfs/etc/ssl/certs
 rsync -al /usr/local/share/ca-certificates/ "${PWD}"/platform-automation-image/rootfs/usr/local/share/ca-certificates
 rsync -al /usr/share/ca-certificates/ "${PWD}"/platform-automation-image/rootfs/usr/share/ca-certificates
-
-# code_snippet prepare-image-script end
+
+
diff --git a/tasks/prepare-image.yml b/tasks/_prepare-image.html.md.erb similarity index 65% rename from tasks/prepare-image.yml rename to tasks/_prepare-image.html.md.erb index af3acf70..7f25bfc3 100644 --- a/tasks/prepare-image.yml +++ b/tasks/_prepare-image.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet prepare-image start yaml +
+
 ---
 platform: linux
 
@@ -30,4 +26,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/prepare-image.sh
-# code_snippet prepare-image end
+
+
+
diff --git a/tasks/prepare-tasks-with-secrets.sh b/tasks/_prepare-tasks-with-secrets-script.html.md.erb old mode 100755 new mode 100644 similarity index 81% rename from tasks/prepare-tasks-with-secrets.sh rename to tasks/_prepare-tasks-with-secrets-script.html.md.erb index 1310d393..cf47f58c --- a/tasks/prepare-tasks-with-secrets.sh +++ b/tasks/_prepare-tasks-with-secrets-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet prepare-tasks-with-secrets-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -26,4 +25,5 @@ om vm-lifecycle prepare-tasks-with-secrets \
   ${config_file_args[@]} \
   ${vars_file_args[@]}
 
-# code_snippet prepare-tasks-with-secrets-script end
+
+
diff --git a/tasks/prepare-tasks-with-secrets.yml b/tasks/_prepare-tasks-with-secrets.html.md.erb similarity index 82% rename from tasks/prepare-tasks-with-secrets.yml rename to tasks/_prepare-tasks-with-secrets.html.md.erb index f649ba26..e20762cd 100644 --- a/tasks/prepare-tasks-with-secrets.yml +++ b/tasks/_prepare-tasks-with-secrets.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet prepare-tasks-with-secrets start yaml +
+
 ---
 platform: linux
 
@@ -53,4 +49,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/prepare-tasks-with-secrets.sh
-# code_snippet prepare-tasks-with-secrets end
+
+
+
diff --git a/tasks/_regenerate-certificates-script.html.md.erb b/tasks/_regenerate-certificates-script.html.md.erb new file mode 100644 index 00000000..d566f342 --- /dev/null +++ b/tasks/_regenerate-certificates-script.html.md.erb @@ -0,0 +1,8 @@ +
+
+cat /var/version && echo ""
+set -eux
+
+om --env env/"${ENV_FILE}" regenerate-certificates
+
+
diff --git a/tasks/_regenerate-certificates.html.md.erb b/tasks/_regenerate-certificates.html.md.erb new file mode 100644 index 00000000..c53bca59 --- /dev/null +++ b/tasks/_regenerate-certificates.html.md.erb @@ -0,0 +1,20 @@ +
+
+---
+platform: linux
+
+inputs:
+- name: platform-automation-tasks
+- name: env # contains the env file with target OpsMan Information
+
+params:
+  ENV_FILE: env.yml
+  # - Required
+  # - Filepath of the env config YAML
+  # - The path is relative to root of the `env` input
+
+run:
+  path: platform-automation-tasks/tasks/regenerate-certificates.sh
+
+
+
\ No newline at end of file diff --git a/tasks/replicate-product.sh b/tasks/_replicate-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 68% rename from tasks/replicate-product.sh rename to tasks/_replicate-product-script.html.md.erb index 20d35b1f..cd9259b3 --- a/tasks/replicate-product.sh +++ b/tasks/_replicate-product-script.html.md.erb @@ -1,5 +1,5 @@ -#!/usr/bin/env bash -# code_snippet replicate-product-script start bash +
+
 cat /var/version && echo ""
 set -eux
 
@@ -11,4 +11,6 @@ fi
 iso-replicator -name "${REPLICATED_NAME}" \
   -output "replicated-product/${REPLICATED_NAME}.pivotal" \
   -path product/*.pivotal
-# code_snippet replicate-product-script end bash
+
+
+
diff --git a/tasks/replicate-product.yml b/tasks/_replicate-product.html.md.erb similarity index 56% rename from tasks/replicate-product.yml rename to tasks/_replicate-product.html.md.erb index 62e7cb60..6cf4d110 100644 --- a/tasks/replicate-product.yml +++ b/tasks/_replicate-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet replicate-product start yaml +
+
 ---
 platform: linux
 
@@ -23,4 +19,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/replicate-product.sh
-# code_snippet replicate-product end
+
+
+
diff --git a/tasks/_revert-staged-changes-script.html.md.erb b/tasks/_revert-staged-changes-script.html.md.erb new file mode 100644 index 00000000..f05218d0 --- /dev/null +++ b/tasks/_revert-staged-changes-script.html.md.erb @@ -0,0 +1,8 @@ +
+
+cat /var/version && echo ""
+set -eux
+
+om --env env/"${ENV_FILE}" revert-staged-changes
+
+
diff --git a/tasks/_revert-staged-changes.html.md.erb b/tasks/_revert-staged-changes.html.md.erb new file mode 100644 index 00000000..4f6a317b --- /dev/null +++ b/tasks/_revert-staged-changes.html.md.erb @@ -0,0 +1,20 @@ +
+
+---
+platform: linux
+
+inputs:
+- name: platform-automation-tasks
+- name: env # contains the env file with target OpsMan Information
+
+params:
+  ENV_FILE: env.yml
+  # - Required
+  # - Filepath of the env config YAML
+  # - The path is relative to root of the `env` input
+
+run:
+  path: platform-automation-tasks/tasks/revert-staged-changes.sh
+
+
+
diff --git a/tasks/run-bosh-errand.sh b/tasks/_run-bosh-errand-script.html.md.erb old mode 100755 new mode 100644 similarity index 88% rename from tasks/run-bosh-errand.sh rename to tasks/_run-bosh-errand-script.html.md.erb index 6a9fabcf..f33efe2c --- a/tasks/run-bosh-errand.sh +++ b/tasks/_run-bosh-errand-script.html.md.erb @@ -1,5 +1,5 @@ -#!/usr/bin/env bash -# code_snippet run-bosh-errand-script start bash +
+
 cat /var/version && echo ""
 set -eux
 
@@ -24,4 +24,5 @@ if [ -z "${INSTANCE}" ]; then
 else
   bosh -d "${installation}" run-errand "${ERRAND_NAME}" --instance "${INSTANCE}"
 fi
-# code_snippet run-bosh-errand-script end
+
+
diff --git a/tasks/run-bosh-errand.yml b/tasks/_run-bosh-errand.html.md.erb similarity index 80% rename from tasks/run-bosh-errand.yml rename to tasks/_run-bosh-errand.html.md.erb index d1d881f1..465feb69 100644 --- a/tasks/run-bosh-errand.yml +++ b/tasks/_run-bosh-errand.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet run-bosh-errand start yaml +
+
 ---
 platform: linux
 
@@ -32,7 +28,7 @@ params:
   # - Optional
   # - May be required to communicate with the Ops Manager BOSH director
   #   if your Concourse worker doesn't otherwise have a route
-  #   to your bosh director.
+  #   to your BOSH Director.
   # - This is the private key for the Ops Manager VM
   #   (used during VM creation)
 
@@ -54,4 +50,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/run-bosh-errand.sh
-# code_snippet run-bosh-errand end
+
+
+
diff --git a/tasks/send-telemetry.sh b/tasks/_send-telemetry-script.html.md.erb old mode 100755 new mode 100644 similarity index 71% rename from tasks/send-telemetry.sh rename to tasks/_send-telemetry-script.html.md.erb index de62ca25..f4fb142c --- a/tasks/send-telemetry.sh +++ b/tasks/_send-telemetry-script.html.md.erb @@ -1,5 +1,5 @@ -#!/bin/bash -# code_snippet send-telemetry-script start bash +
+
 set -eux
 
 ./telemetry-collector-binary/telemetry-collector-linux-amd64 --version
@@ -8,4 +8,6 @@ set -eux
 # shellcheck disable=SC2086
 ./telemetry-collector-binary/telemetry-collector-linux-amd64 send \
   --path ${DATA_FILE_PATH}
-# code_snippet send-telemetry-script end
+
+
+
diff --git a/tasks/_send-telemetry.html.md.erb b/tasks/_send-telemetry.html.md.erb new file mode 100644 index 00000000..39b10d3a --- /dev/null +++ b/tasks/_send-telemetry.html.md.erb @@ -0,0 +1,23 @@ +
+
+---
+platform: linux
+
+inputs:
+- name: platform-automation-tasks
+- name: telemetry-collector-binary
+- name: collected-telemetry-data
+
+params:
+  API_KEY:
+  # required
+  # The API key provided by Pivotal after accepting the EULA
+
+  DATA_FILE_PATH:
+  # required
+
+run:
+  path: platform-automation-tasks/tasks/send-telemetry.sh
+
+
+
diff --git a/tasks/setup-bosh-env.sh b/tasks/_setup-bosh-env-script.html.md.erb old mode 100755 new mode 100644 similarity index 96% rename from tasks/setup-bosh-env.sh rename to tasks/_setup-bosh-env-script.html.md.erb index 7c5bed8f..236832a9 --- a/tasks/setup-bosh-env.sh +++ b/tasks/_setup-bosh-env-script.html.md.erb @@ -1,3 +1,5 @@ +
+
 set +x
 if [ -n "${OPSMAN_SSH_PRIVATE_KEY}" ]; then
   eval "$(om --env env/"${ENV_FILE}" bosh-env)"
@@ -26,3 +28,5 @@ if [ -n "${OPSMAN_SSH_PRIVATE_KEY}" ]; then
 else
   eval "$(om --env env/"${ENV_FILE}" bosh-env)"
 fi
+
+
diff --git a/tasks/stage-configure-apply.sh b/tasks/_stage-configure-apply-script.html.md.erb old mode 100755 new mode 100644 similarity index 94% rename from tasks/stage-configure-apply.sh rename to tasks/_stage-configure-apply-script.html.md.erb index 1bf7437c..063f1a5a --- a/tasks/stage-configure-apply.sh +++ b/tasks/_stage-configure-apply-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet stage-configure-apply-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -77,4 +76,6 @@ om --env env/"${ENV_FILE}" \
   apply-changes \
   --product-name "${product_name}" \
   ${flags[@]}
-# code_snippet stage-configure-apply-script end
+
+
+
diff --git a/tasks/stage-configure-apply.yml b/tasks/_stage-configure-apply.html.md.erb similarity index 82% rename from tasks/stage-configure-apply.yml rename to tasks/_stage-configure-apply.html.md.erb index b8185342..20455c60 100644 --- a/tasks/stage-configure-apply.yml +++ b/tasks/_stage-configure-apply.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet stage-configure-apply start yaml +
+
 ---
 platform: linux
 
@@ -24,7 +20,7 @@ inputs:
 - name: stemcell # contains the stemcell tarball
   optional: true
   # - The stemcell filename is important and must be preserved.
-  #   if using the bosh.io concourse resource,
+  #   if using the bosh.io Concourse resource,
   #   set `params.preserve_filename: true` on your GET.
 - name: assign-stemcell-config # contains the configuration file for assign-stemcell command
   optional: true
@@ -37,7 +33,7 @@ inputs:
 params:
   CONFIG_FILE:
   # - Required
-  # - Filepath to the product configuration yaml file
+  # - Filepath to the product configuration YAML file
   # - The path is relative to the root of the `config` input
 
   STAGE_PRODUCT_CONFIG_FILE:
@@ -73,13 +69,13 @@ params:
 
   VARS_FILES:
   # - Optional
-  # - Filepath to the product configuration vars yaml file
+  # - Filepath to the product configuration vars YAML file
   # - The path is relative to root of the task build,
   #   so `vars` and `secrets` can be used.
 
   OPS_FILES:
   # - Optional
-  # - Filepath to the product configuration operations yaml files
+  # - Filepath to the product configuration operations YAML files
   # - The path is relative to root of the task build
 
   ENV_FILE: env.yml
@@ -93,8 +89,8 @@ params:
 
   RECREATE: false
   # - Optional
-  # - If true, will recreate the vms for the product
-  # - If true, will also recreate the director vm if there are changes
+  # - If true, will recreate the VMs for the product
+  # - If true, will also recreate the director VM if there are changes
 
   ERRAND_CONFIG_FILE:
   # - Optional
@@ -112,4 +108,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/stage-configure-apply.sh
-# code_snippet stage-configure-apply end
+
+
+
diff --git a/tasks/stage-product.sh b/tasks/_stage-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 92% rename from tasks/stage-product.sh rename to tasks/_stage-product-script.html.md.erb index 13dd1f4d..525a58bd --- a/tasks/stage-product.sh +++ b/tasks/_stage-product-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet stage-product-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -34,5 +33,5 @@ else
   om --env env/"${ENV_FILE}" stage-product \
     --config config/"${CONFIG_FILE}"
 fi
-
-# code_snippet stage-product-script end
+
+
diff --git a/tasks/stage-product.yml b/tasks/_stage-product.html.md.erb similarity index 70% rename from tasks/stage-product.yml rename to tasks/_stage-product.html.md.erb index 1660ac79..1e96a696 100644 --- a/tasks/stage-product.yml +++ b/tasks/_stage-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet stage-product start yaml +
+
 ---
 platform: linux
 
@@ -33,4 +29,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/stage-product.sh
-# code_snippet stage-product end
+
+
+
diff --git a/tasks/staged-config.sh b/tasks/_staged-config-script.html.md.erb old mode 100755 new mode 100644 similarity index 74% rename from tasks/staged-config.sh rename to tasks/_staged-config-script.html.md.erb index 514c64f9..1bdca802 --- a/tasks/staged-config.sh +++ b/tasks/_staged-config-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet staged-config-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -15,4 +14,6 @@ flag=$(
 om --env env/"${ENV_FILE}" staged-config \
   --product-name "${PRODUCT_NAME}" \
   "${flag}" >generated-config/"${PRODUCT_NAME}".yml
-# code_snippet staged-config-script end
+
+
+
diff --git a/tasks/staged-config.yml b/tasks/_staged-config.html.md.erb similarity index 68% rename from tasks/staged-config.yml rename to tasks/_staged-config.html.md.erb index 37edd19e..40a53d5d 100644 --- a/tasks/staged-config.yml +++ b/tasks/_staged-config.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet staged-config start yaml +
+
 ---
 platform: linux
 
@@ -31,4 +27,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/staged-config.sh
-# code_snippet staged-config end
+  
+
+
diff --git a/tasks/staged-director-config.sh b/tasks/_staged-director-config-script.html.md.erb old mode 100755 new mode 100644 similarity index 53% rename from tasks/staged-director-config.sh rename to tasks/_staged-director-config-script.html.md.erb index 59101744..84f48e22 --- a/tasks/staged-director-config.sh +++ b/tasks/_staged-director-config-script.html.md.erb @@ -1,8 +1,9 @@ -#!/usr/bin/env bash -# code_snippet staged-director-config-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 om --env env/"${ENV_FILE}" staged-director-config \
   --include-placeholders >generated-config/director.yml
-# code_snippet staged-director-config-script end
+
+
+
diff --git a/tasks/staged-director-config.yml b/tasks/_staged-director-config.html.md.erb similarity index 54% rename from tasks/staged-director-config.yml rename to tasks/_staged-director-config.html.md.erb index 937cfc25..1f0163dd 100644 --- a/tasks/staged-director-config.yml +++ b/tasks/_staged-director-config.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet staged-director-config start yaml +
+
 ---
 platform: linux
 
@@ -22,4 +18,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/staged-director-config.sh
-# code_snippet staged-director-config end
+
+
+
diff --git a/tasks/test-interpolate.sh b/tasks/_test-interpolate-script.html.md.erb old mode 100755 new mode 100644 similarity index 77% rename from tasks/test-interpolate.sh rename to tasks/_test-interpolate-script.html.md.erb index cc609900..897b5815 --- a/tasks/test-interpolate.sh +++ b/tasks/_test-interpolate-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet test-interpolate-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -17,4 +16,5 @@ fi
 # ${vars_files_args[@] needs to be globbed to pass through properly
 # shellcheck disable=SC2068
 om interpolate --config "config/${CONFIG_FILE}" ${flags[@]}
-# code_snippet test-interpolate-script end
+
+
diff --git a/tasks/test-interpolate.yml b/tasks/_test-interpolate.html.md.erb similarity index 59% rename from tasks/test-interpolate.yml rename to tasks/_test-interpolate.html.md.erb index 02acf673..1bdc9458 100644 --- a/tasks/test-interpolate.yml +++ b/tasks/_test-interpolate.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet test-interpolate start yaml +
+
 ---
 platform: linux
 
@@ -16,13 +12,13 @@ inputs:
 params:
   VARS_FILES:
   # - Optional
-  # - Filepath to the vars yaml file
+  # - Filepath to the vars YAML file
   # - The path is relative to root of the task build
   # - These vars can come from the `vars` or `config` inputs
 
   CONFIG_FILE: base.yml
   # - Required
-  # - Filepath to the base yaml file to interpolate from
+  # - Filepath to the base YAML file to interpolate from
   # - The path is relative to root of the task build
 
   SKIP_MISSING: true
@@ -32,4 +28,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/test-interpolate.sh
-# code_snippet test-interpolate end
+
+
+
diff --git a/tasks/_test-script.html.md.erb b/tasks/_test-script.html.md.erb new file mode 100644 index 00000000..3b7a2dd7 --- /dev/null +++ b/tasks/_test-script.html.md.erb @@ -0,0 +1,14 @@ +
+
+echo "Platform Automation for PCF version:"
+cat /var/version && echo ""
+
+printf "\\nom version:"
+om -v
+
+set -eux
+om vm-lifecycle --help
+om --help
+{ echo "Successfully validated tasks and image!"; } 2>/dev/null
+
+
diff --git a/tasks/_test.html.md.erb b/tasks/_test.html.md.erb new file mode 100644 index 00000000..e6513aa3 --- /dev/null +++ b/tasks/_test.html.md.erb @@ -0,0 +1,13 @@ +
+
+---
+platform: linux
+
+inputs:
+- name: platform-automation-tasks
+
+run:
+  path: platform-automation-tasks/tasks/test.sh
+
+
+
diff --git a/tasks/update-runtime-config.sh b/tasks/_update-runtime-config-script.html.md.erb old mode 100755 new mode 100644 similarity index 87% rename from tasks/update-runtime-config.sh rename to tasks/_update-runtime-config-script.html.md.erb index 2feae1f4..9f5ee4a8 --- a/tasks/update-runtime-config.sh +++ b/tasks/_update-runtime-config-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet update-runtime-config-script start bash - +
+
 cat /var/version && echo ""
 set -eu
 
@@ -37,4 +36,5 @@ bosh -n update-config \
   config/"${CONFIG_FILE}" \
   ${vars_files_args[@]}
 
-# code_snippet update-runtime-config-script end
+
+
diff --git a/tasks/update-runtime-config.yml b/tasks/_update-runtime-config.html.md.erb similarity index 79% rename from tasks/update-runtime-config.yml rename to tasks/_update-runtime-config.html.md.erb index ad462aed..324a94f1 100644 --- a/tasks/update-runtime-config.yml +++ b/tasks/_update-runtime-config.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet update-runtime-config start yaml +
+
 ---
 platform: linux
 
@@ -50,9 +46,11 @@ params:
 
   VARS_FILES:
   # - Optional
-  # - Filepaths of the product configuration vars yaml file
+  # - Filepaths of the product configuration vars YAML file
   # - The path is relative to the root of the task build,
   #   so `vars` can be used.
 run:
   path: platform-automation-tasks/tasks/update-runtime-config.sh
-# code_snippet update-runtime-config end
+
+
+
diff --git a/tasks/upgrade-opsman.sh b/tasks/_upgrade-opsman-script.html.erb old mode 100755 new mode 100644 similarity index 94% rename from tasks/upgrade-opsman.sh rename to tasks/_upgrade-opsman-script.html.erb index 7e8265d5..11b51ad0 --- a/tasks/upgrade-opsman.sh +++ b/tasks/_upgrade-opsman-script.html.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet upgrade-opsman-script start bash - +
+
 cat /var/version && echo ""
 om -v
 set -eux
@@ -52,4 +51,5 @@ om --env env/"${ENV_FILE}" configure-opsman \
   --config "config/${OPSMAN_CONFIG_FILE}" \
   ${vars_files_args[@]}
 
-# code_snippet upgrade-opsman-script end
+
+
diff --git a/tasks/upgrade-opsman.yml b/tasks/_upgrade-opsman.html.md.erb similarity index 84% rename from tasks/upgrade-opsman.yml rename to tasks/_upgrade-opsman.html.md.erb index e31aded2..45671ece 100644 --- a/tasks/upgrade-opsman.yml +++ b/tasks/_upgrade-opsman.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet upgrade-opsman start yaml +
+
 ---
 platform: linux
 
@@ -46,7 +42,7 @@ params:
 
   STATE_FILE: state.yml
   # - Required
-  # - Filepath of the state yaml file
+  # - Filepath of the state YAML file
   # - The path is relative to root of the `state` output
   # - if the filename includes "$timestamp",
   #   for example "state-$timestamp.yml",
@@ -65,4 +61,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/upgrade-opsman.sh
-# code_snippet upgrade-opsman end
+
+
+
diff --git a/tasks/upload-and-stage-product.sh b/tasks/_upload-and-stage-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 81% rename from tasks/upload-and-stage-product.sh rename to tasks/_upload-and-stage-product-script.html.md.erb index 7429b7b1..08128950 --- a/tasks/upload-and-stage-product.sh +++ b/tasks/_upload-and-stage-product-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet upload-and-stage-product-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -22,4 +21,6 @@ product_version="$(om product-metadata \
 om --env env/"${ENV_FILE}" stage-product \
   --product-name "${product_name}" \
   --product-version "${product_version}"
-# code_snippet upload-and-stage-product-script end
+
+
+
diff --git a/tasks/upload-and-stage-product.yml b/tasks/_upload-and-stage-product.html.md.erb similarity index 70% rename from tasks/upload-and-stage-product.yml rename to tasks/_upload-and-stage-product.html.md.erb index 7b3cc7bd..07a05680 100644 --- a/tasks/upload-and-stage-product.yml +++ b/tasks/_upload-and-stage-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet upload-and-stage-product start yaml +
+
 ---
 platform: linux
 
@@ -32,4 +28,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/upload-and-stage-product.sh
-# code_snippet upload-and-stage-product end
+
+
+
diff --git a/tasks/upload-product.sh b/tasks/_upload-product-script.html.md.erb old mode 100755 new mode 100644 similarity index 73% rename from tasks/upload-product.sh rename to tasks/_upload-product-script.html.md.erb index c369dd03..ae75b0f7 --- a/tasks/upload-product.sh +++ b/tasks/_upload-product-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet upload-product-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -12,4 +11,6 @@ fi
 om --env env/"${ENV_FILE}" upload-product \
   --product product/*.pivotal \
   ${OPTIONAL_CONFIG_FLAG}
-# code_snippet upload-product-script end
+
+
+
diff --git a/tasks/upload-product.yml b/tasks/_upload-product.html.md.erb similarity index 71% rename from tasks/upload-product.yml rename to tasks/_upload-product.html.md.erb index def27f23..bbfe16d3 100644 --- a/tasks/upload-product.yml +++ b/tasks/_upload-product.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet upload-product start yaml +
+
 ---
 platform: linux
 
@@ -31,4 +27,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/upload-product.sh
-# code_snippet upload-product end
+
+
+
diff --git a/tasks/upload-stemcell.sh b/tasks/_upload-stemcell-script.html.md.erb old mode 100755 new mode 100644 similarity index 75% rename from tasks/upload-stemcell.sh rename to tasks/_upload-stemcell-script.html.md.erb index 705aa9e9..6a0aa6aa --- a/tasks/upload-stemcell.sh +++ b/tasks/_upload-stemcell-script.html.md.erb @@ -1,6 +1,5 @@ -#!/usr/bin/env bash -# code_snippet upload-stemcell-script start bash - +
+
 cat /var/version && echo ""
 set -eux
 
@@ -13,4 +12,6 @@ om --env env/"${ENV_FILE}" upload-stemcell \
   --floating="${FLOATING_STEMCELL}" \
   --stemcell "${PWD}"/stemcell/*.tgz \
   ${OPTIONAL_CONFIG_FLAG}
-# code_snippet upload-stemcell-script end
+
+
+
diff --git a/tasks/upload-stemcell.yml b/tasks/_upload-stemcell.html.md.erb similarity index 72% rename from tasks/upload-stemcell.yml rename to tasks/_upload-stemcell.html.md.erb index d5e2500c..88744c18 100644 --- a/tasks/upload-stemcell.yml +++ b/tasks/_upload-stemcell.html.md.erb @@ -1,9 +1,5 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet upload-stemcell start yaml +
+
 ---
 platform: linux
 
@@ -12,7 +8,7 @@ inputs:
 - name: env # contains the env file with target OpsMan Information
 - name: stemcell # contains the stemcell tarball
 # - The stemcell filename is important and must be preserved.
-#   if using the bosh.io concourse resource,
+#   if using the bosh.io oncourse resource,
 #   set `params.preserve_filename: true` on your GET.
 
 params:
@@ -37,4 +33,6 @@ params:
 
 run:
   path: platform-automation-tasks/tasks/upload-stemcell.sh
-# code_snippet upload-stemcell end
+
+
+
diff --git a/tasks/activate-certificate-authority.sh b/tasks/activate-certificate-authority.sh deleted file mode 100755 index 2ec765e6..00000000 --- a/tasks/activate-certificate-authority.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -# code_snippet activate-certificate-authority-script start bash - -cat /var/version && echo "" -set -eux - -om --env env/"${ENV_FILE}" activate-certificate-authority -# code_snippet activate-certificate-authority-script end diff --git a/tasks/activate-certificate-authority.yml b/tasks/activate-certificate-authority.yml deleted file mode 100644 index c543da76..00000000 --- a/tasks/activate-certificate-authority.yml +++ /dev/null @@ -1,22 +0,0 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet activate-certificate-authority start yaml ---- -platform: linux - -inputs: -- name: platform-automation-tasks -- name: env # contains the env file with target OpsMan Information - -params: - ENV_FILE: env.yml - # - Required - # - Filepath of the env config YAML - # - The path is relative to root of the `env` input - -run: - path: platform-automation-tasks/tasks/activate-certificate-authority.sh -# code_snippet activate-certificate-authority end \ No newline at end of file diff --git a/tasks/delete-certificate-authority.sh b/tasks/delete-certificate-authority.sh deleted file mode 100755 index e02b0e7b..00000000 --- a/tasks/delete-certificate-authority.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -# code_snippet delete-certificate-authority-script start bash - -cat /var/version && echo "" -set -eux - -om --env env/"${ENV_FILE}" delete-certificate-authority --all-inactive -# code_snippet delete-certificate-authority-script end diff --git a/tasks/delete-certificate-authority.yml b/tasks/delete-certificate-authority.yml deleted file mode 100644 index ff333c2c..00000000 --- a/tasks/delete-certificate-authority.yml +++ /dev/null @@ -1,21 +0,0 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet delete-certificate-authority start yaml ---- -platform: linux - -inputs: -- name: platform-automation-tasks -- name: env # contains the env file with target OpsMan Information - -params: - ENV_FILE: env.yml - # - Required - # - Filepath of the env config YAML - -run: - path: platform-automation-tasks/tasks/delete-certificate-authority.sh -# code_snippet delete-certificate-authority end \ No newline at end of file diff --git a/tasks/delete-installation.sh b/tasks/delete-installation.sh deleted file mode 100755 index 2caed47d..00000000 --- a/tasks/delete-installation.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# code_snippet delete-installation-script start bash - -cat /var/version && echo "" -set -eux -om --env env/"${ENV_FILE}" delete-installation --force -# code_snippet delete-installation-script end diff --git a/tasks/pre-deploy-check.sh b/tasks/pre-deploy-check.sh deleted file mode 100755 index d5102386..00000000 --- a/tasks/pre-deploy-check.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -# code_snippet pre-deploy-check-script start bash - -cat /var/version && echo "" -set -eux - -om --env env/"${ENV_FILE}" pre-deploy-check -# code_snippet pre-deploy-check-script end diff --git a/tasks/regenerate-certificates.sh b/tasks/regenerate-certificates.sh deleted file mode 100755 index 99988ac6..00000000 --- a/tasks/regenerate-certificates.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -# code_snippet regenerate-certificates-script start bash - -cat /var/version && echo "" -set -eux - -om --env env/"${ENV_FILE}" regenerate-certificates -# code_snippet regenerate-certificates-script end diff --git a/tasks/regenerate-certificates.yml b/tasks/regenerate-certificates.yml deleted file mode 100644 index e7b69377..00000000 --- a/tasks/regenerate-certificates.yml +++ /dev/null @@ -1,22 +0,0 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet regenerate-certificates start yaml ---- -platform: linux - -inputs: -- name: platform-automation-tasks -- name: env # contains the env file with target OpsMan Information - -params: - ENV_FILE: env.yml - # - Required - # - Filepath of the env config YAML - # - The path is relative to root of the `env` input - -run: - path: platform-automation-tasks/tasks/regenerate-certificates.sh -# code_snippet regenerate-certificates end \ No newline at end of file diff --git a/tasks/revert-staged-changes.sh b/tasks/revert-staged-changes.sh deleted file mode 100755 index 9b3b7b75..00000000 --- a/tasks/revert-staged-changes.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -# code_snippet revert-staged-changes-script start bash - -cat /var/version && echo "" -set -eux - -om --env env/"${ENV_FILE}" revert-staged-changes -# code_snippet revert-staged-changes-script end diff --git a/tasks/revert-staged-changes.yml b/tasks/revert-staged-changes.yml deleted file mode 100644 index 44c9c706..00000000 --- a/tasks/revert-staged-changes.yml +++ /dev/null @@ -1,22 +0,0 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet revert-staged-changes start yaml ---- -platform: linux - -inputs: -- name: platform-automation-tasks -- name: env # contains the env file with target OpsMan Information - -params: - ENV_FILE: env.yml - # - Required - # - Filepath of the env config YAML - # - The path is relative to root of the `env` input - -run: - path: platform-automation-tasks/tasks/revert-staged-changes.sh -# code_snippet revert-staged-changes end diff --git a/tasks/send-telemetry.yml b/tasks/send-telemetry.yml deleted file mode 100644 index 17447501..00000000 --- a/tasks/send-telemetry.yml +++ /dev/null @@ -1,25 +0,0 @@ -# The inputs, outputs, params, filename, and filepath -# of this task file are part of its semantically versioned API. -# See our documentation for a detailed discussion of our semver API. -# See www.semver.org for an explanation of semantic versioning. - -# code_snippet send-telemetry start yaml ---- -platform: linux - -inputs: -- name: platform-automation-tasks -- name: telemetry-collector-binary -- name: collected-telemetry-data - -params: - API_KEY: - # required - # The API key provided by Pivotal after accepting the EULA - - DATA_FILE_PATH: - # required - -run: - path: platform-automation-tasks/tasks/send-telemetry.sh -# code_snippet send-telemetry end diff --git a/tasks/test.sh b/tasks/test.sh index 482aa5d9..6b15c2d0 100755 --- a/tasks/test.sh +++ b/tasks/test.sh @@ -11,4 +11,4 @@ set -eux om vm-lifecycle --help om --help { echo "Successfully validated tasks and image!"; } 2>/dev/null -# code_snippet test-script end +# code_snippet test-script end \ No newline at end of file diff --git a/tasks/test.yml b/tasks/test.yml index 268803c8..364918f4 100644 --- a/tasks/test.yml +++ b/tasks/test.yml @@ -8,8 +8,8 @@ platform: linux inputs: -- name: platform-automation-tasks + - name: platform-automation-tasks run: path: platform-automation-tasks/tasks/test.sh -# code_snippet test end +# code_snippet test end \ No newline at end of file