diff --git a/frontend/packages/dev-console/integration-tests/features/addFlow/create-from-devfile.feature b/frontend/packages/dev-console/integration-tests/features/addFlow/create-from-devfile.feature index 2123f003fb0..2fbaf418808 100644 --- a/frontend/packages/dev-console/integration-tests/features/addFlow/create-from-devfile.feature +++ b/frontend/packages/dev-console/integration-tests/features/addFlow/create-from-devfile.feature @@ -14,7 +14,7 @@ Feature: Create Application from Devfile Given user is at the Topology page When user right clicks on topology empty graph And user selects "Import from Git" option from Add to Project context menu - And user enters Git Repo URL as "https://github.com/redhat-developer/devfile-sample" in Import from Git form + And user enters Git Repo URL as "https://github.com/nodeshift-starters/devfile-sample" in Import from Git form And user enters workload name as "node-bulletin-board-1" And user clicks Create button on Add page Then user will be redirected to Topology page diff --git a/frontend/packages/dev-console/integration-tests/features/e2e/add-flow-ci.feature b/frontend/packages/dev-console/integration-tests/features/e2e/add-flow-ci.feature index bf35d53f701..21f594ed25d 100644 --- a/frontend/packages/dev-console/integration-tests/features/e2e/add-flow-ci.feature +++ b/frontend/packages/dev-console/integration-tests/features/e2e/add-flow-ci.feature @@ -96,7 +96,7 @@ Feature: Create the different workloads from Add page Given user is at the Topology page When user right clicks on topology empty graph And user selects "Import from Git" option from Add to Project context menu - And user enters Git Repo URL as "https://github.com/redhat-developer/devfile-sample" in Import from Git form + And user enters Git Repo URL as "https://github.com/nodeshift-starters/devfile-sample" in Import from Git form And user enters workload name as "node-bulletin-board-1" And user clicks Create button on Add page Then user will be redirected to Topology page diff --git a/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.data.ts b/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.data.ts index bef898cab58..76d99493564 100644 --- a/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.data.ts +++ b/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.data.ts @@ -179,16 +179,16 @@ export const loadedCatalogService: CatalogService = { { uid: 'nodejs-basic', type: 'Sample', - name: 'Basic NodeJS', - description: 'A simple Hello world NodeJS application', + name: 'Basic Node.js', + description: 'A simple Hello World Node.js application', tags: ['NodeJS', 'Express'], cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/redhat-developer/devfile-sample.git', + '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/nodeshift-starters/devfile-sample.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://nodejs.org/static/images/logos/nodejs-new-pantone-black.svg', }, }, { @@ -200,10 +200,10 @@ export const loadedCatalogService: CatalogService = { cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=python-basic&gitRepo=https://github.com/elsony/devfile-sample-python-basic.git', + '/import?importType=devfile&formType=sample&devfileName=python-basic&gitRepo=https://github.com/devfile-samples/devfile-sample-python-basic.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://www.python.org/static/community_logos/python-logo-generic.svg', }, }, { @@ -215,10 +215,10 @@ export const loadedCatalogService: CatalogService = { cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/elsony/devfile-sample-code-with-quarkus.git', + '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/devfile-samples/devfile-sample-code-with-quarkus.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://design.jboss.org/quarkus/logo/final/SVG/quarkus_icon_rgb_default.svg', }, }, { @@ -230,10 +230,10 @@ export const loadedCatalogService: CatalogService = { cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/elsony/devfile-sample-java-springboot-basic.git', + '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/devfile-samples/devfile-sample-java-springboot-basic.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://spring.io/images/projects/spring-edf462fec682b9d48cf628eaf9e19521.svg', }, }, { @@ -411,16 +411,16 @@ export const loadedCatalogService: CatalogService = { { uid: 'nodejs-basic', type: 'Sample', - name: 'Basic NodeJS', - description: 'A simple Hello world NodeJS application', + name: 'Basic Node.js ', + description: 'A simple Hello World Node.js application', tags: ['NodeJS', 'Express'], cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/redhat-developer/devfile-sample.git', + '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/nodeshift-starters/devfile-sample.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://nodejs.org/static/images/logos/nodejs-new-pantone-black.svg', }, }, { @@ -432,10 +432,10 @@ export const loadedCatalogService: CatalogService = { cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=python-basic&gitRepo=https://github.com/elsony/devfile-sample-python-basic.git', + '/import?importType=devfile&formType=sample&devfileName=python-basic&gitRepo=https://github.com/devfile-samples/devfile-sample-python-basic.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://www.python.org/static/community_logos/python-logo-generic.svg', }, }, { @@ -447,10 +447,10 @@ export const loadedCatalogService: CatalogService = { cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/elsony/devfile-sample-code-with-quarkus.git', + '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/devfile-samples/devfile-sample-code-with-quarkus.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://design.jboss.org/quarkus/logo/final/SVG/quarkus_icon_rgb_default.svg', }, }, { @@ -462,10 +462,10 @@ export const loadedCatalogService: CatalogService = { cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/elsony/devfile-sample-java-springboot-basic.git', + '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/devfile-samples/devfile-sample-java-springboot-basic.git', }, icon: { - url: 'data:image/png;base64,.....', + url: 'https://spring.io/images/projects/spring-edf462fec682b9d48cf628eaf9e19521.svg', }, }, { diff --git a/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.spec.tsx b/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.spec.tsx index b62e3c16ccd..8f4e469f323 100644 --- a/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.spec.tsx +++ b/frontend/packages/dev-console/src/components/add/__tests__/SampleGettingStartedCard.spec.tsx @@ -84,13 +84,13 @@ describe('SampleGettingStartedCard', () => { id: 'code-with-quarkus', title: 'Basic Quarkus', href: - '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/elsony/devfile-sample-code-with-quarkus.git', + '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/devfile-samples/devfile-sample-code-with-quarkus.git', }, { id: 'java-springboot-basic', title: 'Basic Spring Boot', href: - '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/elsony/devfile-sample-java-springboot-basic.git', + '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/devfile-samples/devfile-sample-java-springboot-basic.git', }, ]); expect(wrapper.find(GettingStartedCard).props().moreLink).toEqual({ @@ -117,9 +117,9 @@ describe('SampleGettingStartedCard', () => { }, { id: 'nodejs-basic', - title: 'Basic NodeJS', + title: 'Basic Node.js', href: - '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/redhat-developer/devfile-sample.git', + '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/nodeshift-starters/devfile-sample.git', }, ]); expect(wrapper.find(GettingStartedCard).props().moreLink).toEqual({ diff --git a/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.data.ts b/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.data.ts index a3252cbce2f..e7722b53ba8 100644 --- a/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.data.ts +++ b/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.data.ts @@ -4,15 +4,15 @@ import { DevfileSample } from '../../../import/devfile/devfile-types'; export const devfileSamples: DevfileSample[] = [ { name: 'nodejs-basic', - displayName: 'Basic NodeJS', - description: 'A simple Hello world NodeJS application', + displayName: 'Basic Node.js', + description: 'A simple Hello World Node.js application', icon: 'trimmed', tags: ['NodeJS', 'Express'], projectType: 'nodejs', language: 'nodejs', git: { remotes: { - origin: 'https://github.com/redhat-developer/devfile-sample.git', + origin: 'https://github.com/nodeshift-starters/devfile-sample.git', }, }, }, @@ -26,7 +26,7 @@ export const devfileSamples: DevfileSample[] = [ language: 'java', git: { remotes: { - origin: 'https://github.com/elsony/devfile-sample-code-with-quarkus.git', + origin: 'https://github.com/devfile-samples/devfile-sample-code-with-quarkus.git', }, }, }, @@ -40,7 +40,7 @@ export const devfileSamples: DevfileSample[] = [ language: 'java', git: { remotes: { - origin: 'https://github.com/elsony/devfile-sample-java-springboot-basic.git', + origin: 'https://github.com/devfile-samples/devfile-sample-java-springboot-basic.git', }, }, }, @@ -54,7 +54,7 @@ export const devfileSamples: DevfileSample[] = [ language: 'python', git: { remotes: { - origin: 'https://github.com/elsony/devfile-sample-python-basic.git', + origin: 'https://github.com/devfile-samples/devfile-sample-python-basic.git', }, }, }, @@ -64,15 +64,15 @@ export const expectedCatalogItems: CatalogItem[] = [ { uid: 'nodejs-basic', type: 'Sample', - name: 'Basic NodeJS', - description: 'A simple Hello world NodeJS application', + name: 'Basic Node.js', + description: 'A simple Hello World Node.js application', tags: ['NodeJS', 'Express'], cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/redhat-developer/devfile-sample.git', + '/import?importType=devfile&formType=sample&devfileName=nodejs-basic&gitRepo=https://github.com/nodeshift-starters/devfile-sample.git', }, - icon: { url: 'data:image/png;base64,trimmed' }, + icon: { url: 'trimmed' }, }, { uid: 'code-with-quarkus', @@ -83,9 +83,9 @@ export const expectedCatalogItems: CatalogItem[] = [ cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/elsony/devfile-sample-code-with-quarkus.git', + '/import?importType=devfile&formType=sample&devfileName=code-with-quarkus&gitRepo=https://github.com/devfile-samples/devfile-sample-code-with-quarkus.git', }, - icon: { url: 'data:image/png;base64,trimmed' }, + icon: { url: 'trimmed' }, }, { uid: 'java-springboot-basic', @@ -96,9 +96,9 @@ export const expectedCatalogItems: CatalogItem[] = [ cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/elsony/devfile-sample-java-springboot-basic.git', + '/import?importType=devfile&formType=sample&devfileName=java-springboot-basic&gitRepo=https://github.com/devfile-samples/devfile-sample-java-springboot-basic.git', }, - icon: { url: 'data:image/png;base64,trimmed' }, + icon: { url: 'trimmed' }, }, { uid: 'python-basic', @@ -109,8 +109,8 @@ export const expectedCatalogItems: CatalogItem[] = [ cta: { label: 'Create Devfile Sample', href: - '/import?importType=devfile&formType=sample&devfileName=python-basic&gitRepo=https://github.com/elsony/devfile-sample-python-basic.git', + '/import?importType=devfile&formType=sample&devfileName=python-basic&gitRepo=https://github.com/devfile-samples/devfile-sample-python-basic.git', }, - icon: { url: 'data:image/png;base64,trimmed' }, + icon: { url: 'trimmed' }, }, ]; diff --git a/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.spec.ts b/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.spec.ts index 6297dc99793..68f5fed7d68 100644 --- a/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.spec.ts +++ b/frontend/packages/dev-console/src/components/catalog/providers/__tests__/useDevfileSamples.spec.ts @@ -23,7 +23,9 @@ describe('useDevfileSamples:', () => { const { result } = testHook(() => useDevfileSamples({})); expect(getMock).toHaveBeenCalledTimes(1); - expect(getMock).toHaveBeenLastCalledWith('/api/devfile/samples?registry=sample-placeholder'); + expect(getMock).toHaveBeenLastCalledWith( + '/api/devfile/samples?registry=https://registry.devfile.io', + ); expect(result.current).toEqual([[], false, undefined]); @@ -39,7 +41,9 @@ describe('useDevfileSamples:', () => { const { result } = testHook(() => useDevfileSamples({})); expect(getMock).toHaveBeenCalledTimes(1); - expect(getMock).toHaveBeenLastCalledWith('/api/devfile/samples?registry=sample-placeholder'); + expect(getMock).toHaveBeenLastCalledWith( + '/api/devfile/samples?registry=https://registry.devfile.io', + ); expect(result.current).toEqual([[], false, undefined]); diff --git a/frontend/packages/dev-console/src/components/catalog/providers/useDevfile.tsx b/frontend/packages/dev-console/src/components/catalog/providers/useDevfile.tsx index e3eb496c617..8daa2353ce7 100644 --- a/frontend/packages/dev-console/src/components/catalog/providers/useDevfile.tsx +++ b/frontend/packages/dev-console/src/components/catalog/providers/useDevfile.tsx @@ -11,7 +11,6 @@ const normalizeDevfile = (devfileSamples: DevfileSample[], t: TFunction): Catalo const normalizedDevfileSamples = devfileSamples?.map((sample) => { const { name: uid, displayName, description, tags, git, icon } = sample; const gitRepoUrl = Object.values(git.remotes)[0]; - const iconUrl = icon ? `data:image/png;base64,${icon}` : ''; const href = `/import?importType=devfile&devfileName=${uid}&gitRepo=${gitRepoUrl}`; const createLabel = t('devconsole~Create Application'); const type = 'Devfile'; @@ -41,7 +40,7 @@ const normalizeDevfile = (devfileSamples: DevfileSample[], t: TFunction): Catalo label: createLabel, href, }, - icon: { url: iconUrl }, + icon: { url: icon }, details: { properties: detailsProperties, descriptions: detailsDescriptions, @@ -61,7 +60,7 @@ const useDevfile: ExtensionHook = (): [CatalogItem[], boolean, an React.useEffect(() => { let mounted = true; - coFetchJSON('/api/devfile/samples?registry=sample-placeholder') + coFetchJSON('/api/devfile/samples?registry=https://registry.devfile.io') .then((resp) => { if (mounted) setDevfileSamples(resp); }) diff --git a/frontend/packages/dev-console/src/components/catalog/providers/useDevfileSamples.tsx b/frontend/packages/dev-console/src/components/catalog/providers/useDevfileSamples.tsx index 813eec25de3..ac2c03571b4 100644 --- a/frontend/packages/dev-console/src/components/catalog/providers/useDevfileSamples.tsx +++ b/frontend/packages/dev-console/src/components/catalog/providers/useDevfileSamples.tsx @@ -12,7 +12,7 @@ const normalizeDevfileSamples = (devfileSamples: DevfileSample[], t: TFunction): const gitRepoUrl = Object.values(git.remotes)[0]; const label = t('devconsole~Create Devfile Sample'); const href = `/import?importType=devfile&formType=sample&devfileName=${uid}&gitRepo=${gitRepoUrl}`; - const iconUrl = icon ? `data:image/png;base64,${icon}` : ''; + const iconUrl = icon || ''; const item: CatalogItem = { uid, @@ -40,8 +40,7 @@ const useDevfileSamples: ExtensionHook = (): [CatalogItem[], bool React.useEffect(() => { let mounted = true; - - coFetchJSON('/api/devfile/samples?registry=sample-placeholder') + coFetchJSON('/api/devfile/samples?registry=https://registry.devfile.io') .then((res) => { if (mounted) setDevfileSamples(res); }) diff --git a/frontend/packages/dev-console/src/components/import/devfile/DevfileInfo.tsx b/frontend/packages/dev-console/src/components/import/devfile/DevfileInfo.tsx index 5060af8a70a..549267a3a1f 100644 --- a/frontend/packages/dev-console/src/components/import/devfile/DevfileInfo.tsx +++ b/frontend/packages/dev-console/src/components/import/devfile/DevfileInfo.tsx @@ -12,11 +12,7 @@ export type DevfileInfoProps = { const DevfileInfo: React.FC = ({ devfileSample }) => { const { t } = useTranslation(); const { icon, iconClass, displayName, description, git, tags } = devfileSample; - const iconUrl = iconClass - ? getImageForIconClass(iconClass) - : icon - ? `data:image/png;base64,${icon}` - : ''; + const iconUrl = iconClass ? getImageForIconClass(iconClass) : icon || ''; const sampleRepo = git?.remotes ? Object.values(git.remotes)[0] : ''; return ( diff --git a/frontend/packages/dev-console/src/components/import/devfile/devfileHooks.ts b/frontend/packages/dev-console/src/components/import/devfile/devfileHooks.ts index 66e6d639163..e879365dd84 100644 --- a/frontend/packages/dev-console/src/components/import/devfile/devfileHooks.ts +++ b/frontend/packages/dev-console/src/components/import/devfile/devfileHooks.ts @@ -139,7 +139,7 @@ export const useSelectedDevfileSample = () => { React.useEffect(() => { let mounted = true; const payload = { - registry: 'sample-placeholder', + registry: 'https://registry.devfile.io', }; coFetchJSON .put('/api/devfile/samples', payload) diff --git a/go.mod b/go.mod index 6869a0a1913..440ef7e9341 100644 --- a/go.mod +++ b/go.mod @@ -5,9 +5,11 @@ go 1.16 require ( github.com/coreos/go-oidc v2.1.0+incompatible github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f - github.com/devfile/api/v2 v2.0.0-20210211160219-33a78aec06af - github.com/devfile/library v1.0.0-alpha.2 - github.com/devfile/registry-support/index/generator v0.0.0-20210505173027-d06fe2bb3ee8 + github.com/devfile/api/v2 v2.0.0-20211021164004-dabee4e633ed + github.com/devfile/library v1.2.1-0.20211104222135-49d635cb492f + github.com/devfile/registry-support/index/generator v0.0.0-20211012185733-0a73f866043f + github.com/devfile/registry-support/registry-library v0.0.0-20211026200306-cab748834109 + github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v0.0.0-20200309224638-dae41bde9ef9 github.com/openshift/api v0.0.0-20211103080632-8981c8822dfa @@ -22,13 +24,13 @@ require ( gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.6.2 k8s.io/api v0.22.1 - k8s.io/apiextensions-apiserver v0.21.1 + k8s.io/apiextensions-apiserver v0.21.3 k8s.io/apimachinery v0.22.1 k8s.io/cli-runtime v0.21.0 k8s.io/client-go v0.22.1 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.9.0 - sigs.k8s.io/controller-runtime v0.9.0 + sigs.k8s.io/controller-runtime v0.9.5 sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index ebeaac4f1e4..4c3b94dc7f6 100644 --- a/go.sum +++ b/go.sum @@ -114,6 +114,7 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -235,12 +236,16 @@ github.com/deislabs/oras v0.11.1/go.mod h1:39lCtf8Q6WDC7ul9cnyWXONNzKvabEKk+AX+L github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/devfile/api/v2 v2.0.0-20210211160219-33a78aec06af h1:egbFAAS/CWJMAqa4zIm8Cik3+iCqTAfLPfCj6PLEG5Y= -github.com/devfile/api/v2 v2.0.0-20210211160219-33a78aec06af/go.mod h1:Cot4snybn3qhIh48oIFi9McocnIx7zY5fFbjfrIpPvg= -github.com/devfile/library v1.0.0-alpha.2 h1:LN3KOQnUtP/NXl9CLYW7gQgH6gPpCSrEH2mmhxvrpgc= -github.com/devfile/library v1.0.0-alpha.2/go.mod h1:aGJSpcGrRiYwsQQJMQH1ChHuOptUf49n+j0RDBYyTIQ= -github.com/devfile/registry-support/index/generator v0.0.0-20210505173027-d06fe2bb3ee8 h1:BWDxSgyFihqnox1uRNKT+qerjuwIEHNOohHOTg17FM0= -github.com/devfile/registry-support/index/generator v0.0.0-20210505173027-d06fe2bb3ee8/go.mod h1:02/+7NmfHFJQ0C0S7akdfCeJSQSWmOvep8MmYcsSFko= +github.com/devfile/api/v2 v2.0.0-20210910153124-da620cd1a7a1/go.mod h1:kLX/nW93gigOHXK3NLeJL2fSS/sgEe+OHu8bo3aoOi4= +github.com/devfile/api/v2 v2.0.0-20211021164004-dabee4e633ed h1:OXF9l+MlJrirXAqKN6EZUVaHB0FKm7nh0EjpktwnBig= +github.com/devfile/api/v2 v2.0.0-20211021164004-dabee4e633ed/go.mod h1:d99eTN6QxgzihOOFyOZA+VpUyD4Q1pYRYHZ/ci9J96Q= +github.com/devfile/library v1.1.1-0.20210910214722-7c5ff63711ec/go.mod h1:svPWwWb+BP15SXCHl0dyOeE4Sohrjl5a2BaOzc/riLc= +github.com/devfile/library v1.2.1-0.20211104222135-49d635cb492f h1:kKsBWkFiD7tSIpzwfmz7TH89U1U3yRxSJ9UPOo2OH1s= +github.com/devfile/library v1.2.1-0.20211104222135-49d635cb492f/go.mod h1:uFZZdTuRqA68FVe/JoJHP92CgINyQkyWnM2Qyiim+50= +github.com/devfile/registry-support/index/generator v0.0.0-20211012185733-0a73f866043f h1:fKNUmoOPh7yAs69uMRZWHvev+m3e7T4jBL/hOXZB9ys= +github.com/devfile/registry-support/index/generator v0.0.0-20211012185733-0a73f866043f/go.mod h1:bLGagbW2SFn7jo5+kUPlCMehIGqWkRtLKc5O0OyJMJM= +github.com/devfile/registry-support/registry-library v0.0.0-20211026200306-cab748834109 h1:JIonpmUOzZbjB5z2C6K0UYcbo6IAQnLEGZDr7j2x4ok= +github.com/devfile/registry-support/registry-library v0.0.0-20211026200306-cab748834109/go.mod h1:P9ivpg3NdJR7/atzW6Jo7Qs0NbrLjwxoknhQN+lFyAc= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -458,6 +463,8 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -576,8 +583,9 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -603,7 +611,6 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= @@ -676,6 +683,7 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lucasjones/reggen v0.0.0-20200904144131-37ba4fa293bb/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -794,7 +802,6 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -806,8 +813,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= +github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -934,7 +941,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= @@ -1114,8 +1120,8 @@ go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1316,10 +1322,12 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -1555,12 +1563,11 @@ k8s.io/api v0.16.7/go.mod h1:oUAiGRgo4t+5yqcxjOu5LoHT3wJ8JSbgczkaFYS5L7I= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= @@ -1568,24 +1575,22 @@ k8s.io/apiextensions-apiserver v0.16.7/go.mod h1:6xYRp4trGp6eT5WZ6tPi/TB2nfWQCzw k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= k8s.io/apiextensions-apiserver v0.20.2/go.mod h1:F6TXp389Xntt+LUq3vw6HFOLttPa0V8821ogLGwb6Zs= k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= -k8s.io/apiextensions-apiserver v0.21.1 h1:AA+cnsb6w7SZ1vD32Z+zdgfXdXY8X9uGX5bN6EoPEIo= -k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= +k8s.io/apiextensions-apiserver v0.21.3 h1:+B6biyUWpqt41kz5x6peIsljlsuwvNAp/oFax/j2/aY= +k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= k8s.io/apimachinery v0.16.7/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= @@ -1593,12 +1598,11 @@ k8s.io/apiserver v0.16.7/go.mod h1:/5zSatF30/L9zYfMTl55jzzOnx7r/gGv5a5wtRp8yAw= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= -k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.2/go.mod h1:2nKd93WyMhZx4Hp3RfgH2K5PhwyTrprrkWYnI7id7jA= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= -k8s.io/apiserver v0.21.1 h1:wTRcid53IhxhbFt4KTrFSw8tAncfr01EP91lzfcygVg= -k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/apiserver v0.21.3 h1:QxAgE1ZPQG5cPlHScHTnLxP9H/kU3zjH1Vnd8G+n5OI= +k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y= @@ -1608,11 +1612,10 @@ k8s.io/client-go v0.16.7/go.mod h1:9kEMEeuy2LdsHHXoU2Skqh+SDso+Yhkxd/0tltvswDE= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= @@ -1620,24 +1623,22 @@ k8s.io/code-generator v0.16.7/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOL k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.20.2/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.16.7/go.mod h1:ikdyfezOFMu5O0qJjy/Y9eXwj+fV3pVwdmt0ulVcIR0= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= -k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.2/go.mod h1:pzFtCiwe/ASD0iV7ySMu8SYVJjCapNM9bjvk7ptpKh0= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= -k8s.io/component-base v0.21.1 h1:iLpj2btXbR326s/xNQWmPNGu0gaYSjzn7IN/5i28nQw= -k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/component-base v0.21.3 h1:4WuuXY3Npa+iFfi2aDRiOz+anhNvRfye0859ZgfC5Og= +k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1662,7 +1663,6 @@ k8s.io/kube-aggregator v0.18.2/go.mod h1:ijq6FnNUoKinA6kKbkN6svdTacSoQVNtKqmQ1+X k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= @@ -1679,12 +1679,11 @@ k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -1698,12 +1697,12 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= -sigs.k8s.io/controller-runtime v0.6.3/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4= sigs.k8s.io/controller-runtime v0.8.2/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU= -sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= -sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.9.5 h1:WThcFE6cqctTn2jCZprLICO6BaKZfhsT37uAapTNfxc= +sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= sigs.k8s.io/kind v0.7.0/go.mod h1:An/AbWHT6pA/Lm0Og8j3ukGhfJP3RiVN/IBU6Lo3zl8= diff --git a/pkg/devfile/sample-placeholder.go b/pkg/devfile/sample-placeholder.go deleted file mode 100644 index 5e4d375b46c..00000000000 --- a/pkg/devfile/sample-placeholder.go +++ /dev/null @@ -1,76 +0,0 @@ -package devfile - -const SamplePlaceholderJSON = `[ - { - "name": "nodejs-basic", - "displayName": "Basic NodeJS", - "description": "A simple Hello world NodeJS application", - "icon": "UklGRloqAABXRUJQVlA4TE0qAAAv/8A/ECrN7/8vd7Ur//8/dmx/v33cnwf5U+7n7r3W1aurV3E+uLvLRbNwuybduA2dUdPRkxzt6BqskyykwV2aw+ycM8L1l5VsdGFXB8W9uNycUFw3Dvezko1TnI3LzBlpk5uVnFFxd0auDe7QMHItcvzCmjPE3Sk6Orqxqe4Et/sUdxi6XJPiTnF3h40Uu5MOcXe3Pdq4LNxGZ9ZZcXd3W9jsaDc+0uLeLNxd96CkuHtxd3c29iRrdDUnC/cmZ4gXd5ixkkXyRZpwJ2XmMjoyJFnIhR5dScNZycK9OD2PBEmS5LZ5+YGtHCwAHmE/GUIbSZLkJJX/Klw1cxJo2zbtZtz37rdi27aT2o5ts7Zt27bb2LZtmz8/zteFRdtWlWqdHmR6tZvBYXNQ/MW1wWtcgIITeH0oGAUyWHMMCScAANQFIxEZ0o4WAgBmKKCZZQYAlyLFh+s2E8+pJLUxHZQ6UibpBRPHmAKgwptTuDAGoM4AlgtXAmf2DPeis30Onek5dPpr0Wn3oujprkVnvBed9X3ozK9DhICqkrSfIg6w8GYzv1kASNu5KroTpxqI4VNxsxw+ScAIKAISn8Uj8Rh0+nuRzYxwHaxbfwTwCpCwZhEuHOAYUv2LVbn4VDyOkErICB0lopQoQjjMxTvNCZ3lfYirgdgFTI40ezA4wCV+RRyeyRJsBFzCQMlxTClPOAKCFzvyI0iGzd46OvsBJpoybEvSEU9JSFNOICw+Fd+sUN1iRIGZ5gsqwMEBZtPlmK05VxRJKGFNsYRi5cTmDHLCgSECa75WQtIl/TzdXlQd6JPAFU1mUYTiDJxmQyZXCddqoHe0eUqxREjXuJZMKsFGvZxWq/lKljLVVuIjodoMrWN9GaAKo+q360D5QCwlDTDlirux7yyvQ8ws1SkqCkhgqDmBA9YAzjqqEjPK4bri+tJAU4GoZM1+SBwPYGak2aAXoNLk+nKavajK8LgEwhGGSDVwho7yMc4oJ43LHCDBZmMlXAzmImUhRQim65W1+pirZGM44TIPCSlg0vhZ5mAAE8DpsesGGnC1ZLeWJapj1tKyN1qeeANiM2Nccee/ACzvN3Z2rQFUK0kFiWh+SpSloZwNZIzCP/UJRWXAFzeQcOYocXe3pqiquDYBaxPG7mSTVEnvC2d8FlUGxVKaUm6uNcGa4uqVM70OjWXF1bBdypeOgWqJ84OjBbQZtpMNUKikM6S5lCLxlfhKS+Zq5cTmTLL2s8Q4BJIVKbFaSi6J5Wm5ScuYyVm4IIIBGPJCLNAGcLwpHXYt54traXKyh48mhOsplywAgMqZaLFr5UCU0rJOp7xoMz2MP9A7b7isAYQtV/bKnnKHeNpSS/lSN5FGsgTW9mJ5FygkznEOp+hm01oSSslSOZpg5VLZAVzQWFkAGEXmVW1lrkWUFz4V+OJmasU/z7gDvQeP65dwlCnvCt35WU3rsJSMEfyVGuclR8FQZSkqbB47IDHHmOmik/REaVqJm1HFU66BIWO+K/ReyqxiisxfnlTiBVo2VGVUkcPmOvSHcHDZGJnYAqjcSC3FWk6JCjQslZaW8ZUa12uKErAaJo8eAogwrgFbkoGmPNoXJ1moL0vNG+NSVBZUad2lfJ8EliaXeozCiyZIOSQ7YGtEixJ9t5ibA+bGnPiuhkS6nizF+k9lCKwZoKfBkWM9lcT4uFha5ggkSNZsTifHF+id0Fo9uVek+GosdxwvRZMslTtWltoytNr1P0BdETgHKZNFybhapiol6qif4swR5xga0KW5ejoJfEuqxuKpB8q7omWiJUHlOLLUkFFBAmsA4UjVUCyUHQkc0VLHmK10n+aEiE8c9QaWH+vzoCMC1xHWdDHYsERZDZGl2iYMCQICjAmTWI1llsTTthI3E/2lXEWdwMKuhtfRGcXX1rPSPihuoFmWYj5yqRrSOhYAKini/tgv3RgW8ZSWlJ+M21x9XMEAg3XqU09nOVw/M8rFDSTwtMlSToo1a0Cy1NAQaAYn7aEaK0pOk/dsKZGjS+ZVUV2hYumWZ86vA/eR1GfHc5ZYlpa822qvL0utjhiIp7EAUG3jqkm1DgQSBKLlvjRQjTI/WDrAjK6lF4lFXONaNo9mRZPj5sms0chSawDHgPUvdiQ5pUlWU0r8cqtJVJA21nOy9bRJgKPP/IvFKkviKs2yFOUhDSmDkKUmgT/HuXXAXNoQSYef6qUlnRSba5gUDfQv616ILQCkHUx9sZBUEqSNTJbqXAeYKMqQLWVLAk2VRhnvMDNUt1nijbSDsLAunGrAj0iZKjo5T3ylXZZq6gEbo23AzWMqzFaC64myNC011h6Lcf1/U0BbI+7ShDzxsrYyTgjE1y5LcX5ytIG2Ju1kk3SIH3uqSW0NaZYlbr6QImo5249dSeN+MsaOyEtEd2tzRH2RliWTa5HrEsNosrLUFsAlQrp6VebshrbCIRAnKTYzzuylc3HsXG9k4bYN+J5Mrx1vdyTQJEulk01XllrfIQzgCn0QdryiNO0SbiBz2YTL9YgiDfRGhHrJMClXrsocaxGxtIkY+aYpS420AZy1rGIzmnG1KsZTuViVksJlFjFSURtApUv8jh1pH5TANQpZagugaHC+/PLuJJVAq6SqMoUiVzOVCzBzMGKaLev7dYWcL4FrBLLUGsBiMBeI+VTHdKBpJY44GadIYX8ZMkTMR9S/oWsbfA7O2sc1WfQqrvheU5eluraB78l024tlT1tjiiNzw4n4N4zEn8EC8LhZzxYT2XEJlDRlWWpiAXjhRH4impsSFWjzBTKlFVIWYxOhvsAMwLL5I4lo3pOgCctSawCXCte7wkp+TOu3r9prNlc7lxewFaH7/3wvQOVLdNuLOSWB1TRlqf3AHWVOc80Wg2xalK/F0/DGnSJplnWKGyrq1oJ+RKxtAwydc5GzaE5lRVTQMFkqghvSuDrsWrurtRV1KrNSoLxnqQKrEV/isbQ+Y6maX2qCstQqwLHlyloptc9K2tK0ktlcNEEUECuA3oNNIYSHtZlTnSokAwmcpiRLLQAsReblYmJ4Q6uo6FUWbU4PJxDY3kYTsO0hsMW4dl9EUXJpCVRTkaX6J0CQiGNfpOgPaMrz00rS0x2pZ8V1kYsJzDShSB5GmvNYEtNHxVHSFGSpg8c2pDEtxZ6cttp14MpYvhql+kqlBfQ2MY37sonMlT25MVFNQJaaAbgsuNKqpcqg+JYmtz9IHjaZMmIdsHawiVV8JgGKF1WHvZj0RVmRLUvNAI+b9UQS2XFxNHzFnhK3HLc5g1zhAIM1wap/P68B4BWNOfdoUkvToikRUpZq5Ajergl8J86BY/x9yTsSaPpwMk0IzAPEDR6iQG+Tberg0uD6Gq3mOsQNtMtSXNpAWyNP5Thy6lOFsivK0Sarlaslrj8s/aYcKH9wCyBtYFWYrZcWiKVRlpqtJ0vRCuxvRDeTyohJqsYq2sJBAiVBuTXFVcW1BVho0u0fy/04498X4IoljdpSDsQNNMtSJD+gt9F2a8qZOXOjRTTVri3xymI//g9w6jUAy9sG0IGNJEU8TCc2NrRps6645WiCE8+RbpzbAt4GVYYtA2lRnpaiXaaHzSxzj0rSINo/2wAqcyatu5Tp05SwlGRzRdIAYYq1xvg0JCWq1iKLO6WtiuHJVOYYWY1JYEyMo4cMCWwBjAfTbtdynqhAy80jcWMUDsVU/+QQWNKkjgRpWURb4FB6oJBiNXA5Gkvr//LXRl0ozBliLjU4rSmP8Hw5b4wyQJFBv851TJz6DL9Mk+FKWmkq0NulSDX5Z08liNFtw+nHSqgzn6PV9iUtToinhMItEkm6q+rgenC2z6E/KcrRkt9uDCeE8pQkZ0Sdw0Z6Aco6qjKztaylDLYCt93kCtTX/1gFZX10n/KVhvx2KV/qpkrnWBpV7NN6F3DHOSO2VDT4iEpa9jxSDgX6Gqm8Oizh7urYpWpFrlYuD2Bhp4H1b+FIcO6NJbLj4W9pK2feRz2f2vB7V+aUG/bvuMmgyMWhOk4hY8LQgsBZEKCYUKVUS7mwrqJypldI2dDViKyVcVHh8tuN8ZUa8/r3AasG2NGDy5XVWqxVAvFV6Fuca4nQwIRu2w5C70lJOgij5vSVowmqXCp7YGvCAHv47QUbMtfJv3mW16HZuXRIV1ypuRUmGKt6QYKg7E7lQ67LF8nUbKaLyw+YPGiYIf8sFdKH1mo2LSEDVnZvkh7qxzI4H4oDEvLutySYedI5ggYMGWqXD641r69bQuac7vQKKw36WR3Yd41l0yFvXpX0n1Ez2qNCbO8HuI7XQr6tOkqkJhJBPygUSv/hKSvUfbpKegVQwV0wWKNA7DgWo4wXpiSEDbayhllU/WDRWezCoIRikbRIkjHmHqD3lGuyMCuhngoM+wT9uFEEpyf00oXzvBrAiKlrmCxOjxUKp4eKQ8XQjxMYTkGF2WIqZhNS0vYPMfhJD3Oray8dfwAAHb9g51DnzMWdtDo0urOR+BE9IWe0Cifr+XByjwr50PNibn0TERFPaphPs7q2v5FRPXpyQlhOPkF3OufDOXJ6VPTqF2q8FucS5RhxbWWCSIcfL+sx847ynfnLxFMlEP/fBec78Z7qGddN1hnWbsabaz2VHkmUWwgAGD20bpBsAyQ6h0FinFDfXhLBavhtdQgA7wCX0j0gnWX94NRypsuF7s1zfB9kF83EsWYW7bN/HnQni7Habso0p+ErUN2gCuZY1Mv9FoYM8YFZSUogTXI4JM7J9Y2LTSywsn92gzz+g5MAXiOznXKHVMm1EN00Sz3xYI7DPZAvt5fLc7nyQK5yaZfWvi+I95QSJ654rKF3QTlMWPEMBLBlfJAuRc2enkJPPSv0yDX/9yKqm1hFQ3oacQNJW4g3zHBpM1YbfFL5pbRylR8yqENJYCnXSS9ldmel1bSFqolyg8sOwMIuI4MKgLh0J5dIPN+zVH1zj0YpTRQCtLIAcEm+lmdRKrbu2z3spi2xRLNZrnK8wXJeSnaNU8IKf7dAm5E9sGjnqU2H7H+hrJ5/des1i3vgiHDOco0WY7OVPqUcJaLDEUnSQUd7UDU5vaRDNwp7O42L//3uamk/tOhTuAIKQxuTABFKNWAXBjYC5SrRy5RYHoefidqkbpI/sGpY3PovUgjz+1FaPZ5JUNSINDP6pGYDUUr0NEuJWhqIJijJjCoWjAoSo95WN3i7d9a1ACy3c5sDJIH4SnQ35SsZbzevhuOKiVFj4gRdIAGA6nS18KQCCSxpDFOBWE+qcITrFDBq1IwA98kemxNPv6nauq/32VQPgF0GzQQ4t229Z9MWXjVwtvchzn2MGjO94DqQGk6LJY1rljiZ4jVgy4hZxS1ulSUJtBWrgXLc9OxS38bc0bmxvn0dnu94jtZDtXmyFJSodmDSeBlBXVic3j0t4mnpoOn35XbPqXitEC3FzmmlPdFCrW4gIQnyT2q8z7e0zOrJ9OE/wyWLnYZLL96lvTvsfzHwlXc0L4cTJyZ6kpT5K+H0EM0v9DpTKqiqmbo7QdVDmuDMkpYkam+utEr7tOtJ2EQgxxdZX9BvtOwHYdIR9tAayher4nfbQvT92hfHhFFt4jKmyHLROWSCwGvwHStsRzBag9cBbyMV+W0x4oTWvec4c+3/bkQqMSR2t+RVWBdNKX+jxGxGr8HSBc6nVD7MTMoVEj+dWqQUso5SWREUaDYSjWR7azn5UlwsW+ESlgxXqYqNdvIEKFqcpXiY2G1HJHMk/iU41vXzZAZ2DU2EzqR2dfV+xfoO9i9gnrAWzbnBcF3xlcPwq98Sk4bKFjin7HxahSk/8+YLZdkC2FpukJS1AICkzfq7Ep8OE1ukpJy4Xhg12Fm4Ko9shAtgLJ/pdeiR0BxE77YOYuYawIqR2rSEzbL6CGOsGyij+GSVeGiNwZNyghQPvdp1tvtB2pat9YW+OapiM+exYKBwIDihieEwLkff4QfIksCWjvH2hHvUDcLM0xLjVAEGyho4DzYvMIzzlDGJ/TgE/ewQmKupfOgn5cUHOPqYN9LJpNSVaT/00jWq2Y+LER3ZBWrgnY8Nh765uV/KROCQgXIbmeY9Yyokc9WLhr72PnBl1eEw8ZA5+wNiyEB5NcxEYUxCUraZ5+iCzkZVXt0I/bHGY0Q5zQEYJvPgSHHN1dzQXODZvw9iXcFX1JctsKLMpKVCeq09XNMUMUwYJROgKHysWjo0T+pOUi7iJH2ZB0XuamqhXwYyzpih0zgfOPKc+ZoXmuM3mdtYg963r2BmQlc+9kUZJ6waJXvxNrhmw71Imly/sEtnviKY/amMUqFLbk40rtI4Z/lLf611LDSzhT9DEsGyvqyDsIlPj6VVfbOUw1rapJzEVzRO6lr4HUtLoTfLyZpUl9C1X+ckNTir6U7GC8dt1y3USiuUdeg3UL+LSUxkw7yRlgKzG9i/Nqoj/bgYpuTFXM2LObmevZjzvDtWPysWB2GYLODJbR4IJx9PSZXqJ5U+gJ06nn1tAmAxn/4J9e3Wn/C/WUQAxkkvmMDu4TDakRKvw6xQ/aK4UYkCANrWdIkrCe+o9Rspo/jiVMt1ICcMgXgZMavPg/OLs+dR3VHUs6H3sbpf5/hug9VdOEXVFg1dRNOZbME8y/tQQOqnJJAukzy5TF4SjmOrVznTubP5YRLMgWJF+Rr6M7n7ynk5nNiMDXONsfKJF5yjjBvHmCISQktZex9rXfPNCDtBkjtXbVApTd2YOcL0YDvBXZJC1d4sDdx3qrqXQHWHiGN2MeYk8R0Dor7tmmnrH20eXqyBdNM+XnOboy+O788O56i4++KFc4aaFQ9nTyaaSMm3geoIldcbYCRmjvtTXDZ+RkElSRlpnZNAtFugRClval9+d2Wf1Eorm5uxfScT9e+F6yoniGL1VevvgRe3bvCztIHkl8hYOrQ6KyXqnNY3fOC8Y1IrmSeuBFTj/4DqMRNJMt8RSr1t7zRysB+kl5sXJeLpFWCgxHUc1Zc8MHzOHGnT9Cm9XN+Zvb+LznAAvUPGzU6cz/sDkDLsi3LFUqKj+eJ6al/LgYocPsvrUKqPaGBuMWu4JQD9Q0YNOvFWmezNcof4ge7BJkqUM5VMSslu5TSzzt9MYLvfoMF+8IA/345nrMaJOnMl8PtyTslmLT9Dij2wSgHGDPqBLzCeaB9XgdsICRElSo1V4mYP1R+OEdBpzGAvHQdAMrnoRKxy1HMDJY00wcoE5lHiyo5AzBgzQC/A8mHqE91++5TjSePMHohbEfuWsHTRa9BgGaBZSPgQ+TU7au3OKl/cRnjLTUv60s74LGJmbyV6DRpgEgDFhnKf0tlqxxZbdmeV5Uqg9M+EK62LpJ2YMWqAVQAf9d9cf1Yls7SyeaQmuSc1t08pX4kX6HnLF/Zx2WPNsAH6FwDUDUDSYAVSPWJKzhM3NxPRmixVDrRPkwRu4DqBOEqHIGwplzgDjCT2GjcA+lcBAM+OtYoTceOokqjab2r2dHvRYzz750Gp7tPuRYVFYQTJ3bnsWGAFjqQb5otdoM36jH5DB8DOmVXUMyriRT5KkutbvEbKpzdBauWwiWUpFc/+fVCqGq3JxoHKuHK0++mWcodLnI3oNXgAYGT7UKj4ma/LYRFaXCvfMLGb6hrxmcl5s4/Gqp5YjPVIrjKWtrT9EJXkV2YNcrJOi3ZOXtxkyFyEBolKlmVP+FPOUX282POIaZcWjw4MBlrKJitt9RAO6DI4GsHmdw2tXdz74BVAfdtxHeCIUzlQxVBec2opS4liYSOn4eQ2gVO2/zLWDI7GtNHtmZNGAACfhkqDFfR4WCPdCVfD6QxVR5To2DFIMEZGdoZhWc/dsOuXbAMAS5TwZFJqifElsbywfbEYC3QaIXuBEx78CfXtlKd/pTsaDDv13gUXlgFQbLlSVuJ9Eka5d8omVRQOGSAjIOicrzeVavrk+nbuq/mbVM2UlY1wxpS9awcBInqpNiYqXPQGcx9rBkgXmGfX/P+LpCdEg3k8RYwR0hiB/jbTD45TPJxqqcYTTLwRTt6LW3aueNY5bsBE2qJash9CIx3QhIsAzmUz44VmLkZV0WWAjIJKhWpx0QlCFZfH26SnaGsUJsAlw+HUwjwtRTldHJIhQiVHNVdzQm5QS5WTjUYzVsbKdGg6Wh8/iWaAjIARCbNJ1lKBM/5j0ThNquvgem4nQzNWYKZejSH6HSwKV/9pNsQlCF1cWjXKauxvFCbBubN5IDT7Ch+Nim6IXECDUZXH5lQo3LL9cLHWSHDuh6OjlTREohohdQBQ/TYz6TBB16RSoFH4NCC9L2bCvNhz4QzRCFkA17UTn5QV8sH3aoQtlhsnQSmoHg2Jl60y+TBCVnETq6FfSJBMfUz0NgIjuEgOu7AkIa2cYj5h1BD9bkavHA9Cv2GJMJZY0J81POJq1g/9cPwm5ywepiHWO1eZhtJYmG5z491MEbCrEfyu35jIhv5kztiee4AtI2QNnLt2zlUhX0rOpLqLoXmdYRFwe45M+aEdGK/GmeMoYt4IGQJrTeuU5YduPLOWuh8ndH7zNoJQJyZaOyT0i/EVIhcwSN3pfM/BSLWxYRX6bXGsFa57QNuQbuxvA0v9FsXGQ98tlSl+AfQaI6ug2l3MSZg/Il6fyRRfNjCxOq/HSmb2A4wjazxG4UlahX5nkWueSw17jZEJ3HxK/+GpMDFXaXErVZ95xWUIAIfWuEAD2NV1cWQAgM9J+VZqbRcvTOadS5E+Y8goX2yB2FtMhnkSK5CspDZYid/zrh6nUnVxJ83M7O/q3HVw5ODE+nLnQu/FHdpGPWPEKG6kv0tmx6BYoae5vrdIssayUQLUBTK11bJYWsagYiZOoWSQzlM5vkguMrTaSc+ZtJbqIOf3pVZNyXniKQn9fIFF1kd0wTBZALOqQBEqCcI2HrpWcq62shm9Kq4+Jofrw/XkOkAEEk6c9VR2b5cw4NJltC+CYnA7L5tZzdrG8SRiSWcpT5ik10qZrdlmYTqT9vxwe63MFn4doYx14wRbYF2wd4urNJy+THlzx48tHu5O/aPH2B2tOdnzXh6zfHlPKtcRb42ljjkSZaJU8/IDY2mx0mGPP+SUTapwbMFAAUAk2K9S0xncxBLfcWezSRI3tzHri8TrmTjpjuTxlexg2nc0nQDIl92b9xIjMFS2wVA5lamX1YA4G98XNwg8RwWuctOBrywvcMTXuo2evKzUO7hO2GusoAsUCaKueHnq2ERjmy9yeSap4YljGwYLJkESIxVuZpZEBY2LcsQ/YDNVFEm0wXDBKuomAten1GK7L77VuEdeOxrY94xEwyQMGKwDv/spJ8pjoizVWClLlg7EWqkuAJMwZLB3BCwDSq4ZHVhS4qtGILDSzqXVbKaJWQt0waAB2gBOyMMrRjPDgSs6JzzlOn35mn1JxFmCjDbAuME2sAM61UFOt9nt/skpX4nSb4pQhFY5G7V/3F+mqAC9MHSANaAu1FtBypfHWBISL5t2PFGq4d0O0kFffrpmrhC9zCUqVWByBEYPJmYAUFl/SaJx0V7pUZVki+cHjrhK47KWOzZ+/LRE7e7zsH5TeZIYwMwEYPwA6AVALYFky5xgZb7SwmaxVJPZ8qXlhluW9qm06/mBChzX7diX3Rg4PtkXL1Q3E31EO+n1vyBkAaztAiKYd3ty6PNuWD3nPiWSAfp7AYAiTrWB6+i95NRwOB0l+8TUSixaqNUW4/VssVaIxlbME+3oBjN+e28DEX4xO0QCMLEGILI5peCFzOp6TuEwIxsAy70AgE/8k0gWf51q//X/z6Q0TgWni2qCNUOZvyrO8u5c7ryvkmuBa440RdXL1HBlcL6wblHt43LmMmKYAID+Y+dvFAon33odjxx97p7Qwk7hi9z6iAfArq73sR6iWfEOUNF+NkWRS5OjS2VwO2/nX6XSedVU0qdQkVdDZNRfsR86Wp1hsmZCn3dF9TxVHR84dM5UYSkU/qkGIs2TJLAc+QAAJpZnDq1qkx53CKy9j7Yh6G1XegNCnz1otvCYX5J+cMic3sNLod4INqqkI0BbhKPFL504OD+6vk6A0YOjaCQ7BJCGWkmCkC3TrczQDYV+9kvCNOupuHAuEXRgqFFpgiefYwNAE4G4WgsZpK3GSs/rfehGJ6he2ZWQ/zdRUnx4hD/QdtAw2bsFUFyJFlOCkJnH8BmfQ88burEGrrDEuKtCn2Q6H01w8jj2wNqIMdIGvF1OSnep7Id05j0Sd5PrLnr1YhcIlbu96DsiIRPT2ZUa6Q1HCVjVqdQKf94FgtGkmARYUvdpLDE9HlrWUX5L9V3pOAyDgyD+mDkJwmgc1kBQ5HCY428VPIFtPbyWHi9MVannCl5SE2KoH6f9+QEfgBm1ZSAd5pQGgb94tx8Ldun41ZE27FkKJwcEStxMrfj9nose55knMet5LWHO23brm9RKSJvundla9sN10rFUJvG8Ma/n9uOPmQNiaTjrcLLUTUnjMm/oGH9Gcbk/ruaEj4lrKvQCLG3W9z3V3JgoXyQ0LoWX4HhiQUeWwRiV44PiaOjkN92SsDiPODINW0c//mG7pEOz1MoZ4lCbBvsBKsbNu69nfh1aapHwjRqWjMfuFQ2od2QP65B9vB9eDLGU+O1ic02Q9tU1ENtdmtkClV/3dBgGo6w2FrFJnHC3DWD5E932Yt7RMPoxpZzyCslF/wBH0rfNyxOxJGzi2INEFVJcdZwdDRhU0RqYN/ZAmE89XCWV7BBoGueaZWwvOhHNu5qGcOrLq7S5vuoe3LQTF7CDRLa9W+t4HgYHYjHOfypDra74CD7sN+qZUqFpT1D+NYmyllA4doBe06JcTb2zDyQunMTCTuhsnaizZFb62T6HWsYl0HTuedmYMytct7jEgBlNh8WlOp9qD/dl/clNqgdYjWw6J7ADAnGYxC46A5aWqb4S9VATpMGnhMlGcN0BzvNE63kdSfuaDv9REZtqiBQFDG2H42Hij32zxdlwW5mxSXtxlZHMwRmAtetzFk99QhklKtDyH5Ls3CZXE0mtcbo3dAKEHzFmS97RNPwCR5zjD5tM+YMD9k6GiYi7ZR8jlpVwHotbIzaAhEUwCwDHkCq+Gsv3aeqWq1yZrcSOfOVfi4VG8mK6QJGmeuIkstpGnuEqGRuoRonvhDmAeQ5c3js6v3d1DaBSI9WttIftEuTXzk1IYSKSU1ziXLfLZsu4aBqWmS9+slB8XqRtwGRj1mBeFyu5GksOihWIhjJYsrOmUCVSub8nHNeeFif2vlfbw/9Oh1eoshGx1tUFEBGPz5aKtsGKBqLyXpFq6rWwgRgaacxOnwvAl+LUp1rzjniulh91QPDErBLDpK9Ue0jr3/nl3tXr8oOu8c2aBQpf0la4javYPx2rkTsELM5aSqnZWvHE07KUI/sqKzXSS4p844+RYWgCD5K058/YMuBq+m4t5bdnW1PF2of4cZz5CzzdtWgzmp0ST4Xf+R2SGZYjdSmW2os9ZjDTlVlxPE1t0t6lRRP3lsseOCkyhnTBJUl1L2n2HdW0DuWJ05fJe4u1WrzjAhmBhixaORvdXAUROtIOXgC4qKQzdynVMadE2yhVnUrc/hJMENC7M2JGOXQurn+lanJJ3EBrm5DleJbnSVo05E4V88FiNTJHHUR4crXbtZwnlqaVSMugeXmcqyxWJI1+bH4G4Nr6+M3DSd2DZSzJlog2bsmR6WRTLEnpK6X2DgksTUtNDcRiVH84epE2GKChLpzfesHz4xqy40lfLB0TSknZpArBVgSmnjPnsSRaNrQOgMtPFoqfhbI9EsfI0Ab8PebGVzeXhtMSKN2Q9gRVQeStpBM04cSeG1CUnCNKW1mb8Yqv9LXUCUboOJhmAEKX+RGtZqbEc/SaNrZ4gR8a25HlZK8Br4ZTkyokPVFK09SOuZUa6yWVfOSOC2VkBnjtlBIzOrAkntIBR9LDJhOONkQSWwCje+tiRwYGxQq0LZWJJkh5JLvIHhPM9jpACaXqsxcHPPFVw5fYV95k3UBXhMlq75u4+pTNsWGRQFMNy8vEbVIvKwDY2hnxVXwu1m/iLKSk4pKEnt8QAkuGx2yqh4ikGsdEJ/B+jzMCS0tbJaDDfPakK/8bWEbE2ypA0WTetxRr5T6lPQdWvqQv7fCRNx1R9zWAtIKUn4iWLW0rUTKWjMX+419pKqNimZ8BWIZcL66rfbjS4gWiJRFI4OV8mzTKcoyk+yTAUaJ6W1ipjGlK+UrcciHF1L4aYOtgUxrizrcjLjP95opUsp4T2htVSizVMhA341Svn3ME5f7bI6BBIo6fXFQVpamsdcSvBMV/zuyjE47+/U1qYCMzAMNigv7fkl2NZw9k+3zPs9JK+V4Hif+kjsarNmeJ851lhci5T/QCFBemuVjLaQvJUpYMb5zxObTIev64m+AQsEbXAPx9ymmm9EBis9pai4uIxBcLexJn/xzqflmcMsopjg7QuQsRwiGAZUKVXC0lj11K63CeE5/qdwAX1ySHurQKAK+dKozzlMkjBu72LxwYTI+VPyWH/f2e95P7+IwmgOWhCMrkOZKU+x2JjaymOTxL/GTcZnUzfsDaQTRR218/nydRSDKvnWNyKRTrG/MBSKrv+zoBABbmI0ZYWgeYaM6ILe2OxhF1ycaSeVWkyyQG0IWmbPMkGE7VW4igS6eZaAMe3MWarRcYaFrKVTKYPP29KEb8ZnQMYhBUO+f3b/d3du7c24Dq0nLjeBpPietrtFoZ06YoO+LkCimu2s8JrB6EAdryBDDapXfBBnDRiPPPKjU1p03tcsVNquKV/BQeONa3jTA1MglcPckIWNul84CWOb5XZi8OWOJqdDPHE/EPziUHzBhiag2gGLL++MOUXxwtPb3iSeB3vIlEtDyldTjPfjKa4Mp+ZgZ5USK7tgBGnvJowxQxPyDnzBPGKIHppShT5N7xYmJ8WNK+ppSTX7Qf3hsC2kaN8JJT1oGL5DrHNVWUdt8pL9rfj3DUxUMYGgXtIHPgaRe9jKtJU02LbIyZxCLVxUcG9Bvg2J/WABBqN+8T2IWBenr8UvkY6foecukAbQ0fbjhpJ1FfLOQcCVxtwlL+SIn0k9AGtgzxWmvAReHaxvXlAxYLA7PiBfWKyvENk7P4IbmhYkeBoYa5mVwG9/BILLlPU8pKS7pSSHHVfAdgaxeM0Fj2l8N0llKL5Q5RVoh2uoFF+23+iQacLWIGIEQ5Ny7NPKpt+ICBJW5SiqwJyt4dRxj6+w0xRfi89Jod7Ut6TsjYHcurtxATz9IDtrSOletqiEE7nrO01rA4/GxCKE8YGYMc/9cuEGK1FMEtj7nKUeGLjelx86Ju3q/FwX4tY6vmWs0pMQ+3uxL4mtzMqXx0hZVFsgJ6YYx2EJQDpdKlLYmjNF1tTTluszoJD2BmItxSXKrvv7Wa6dNU1npK1MCizeni8gFWR2CcxrWZ1FysZZQWgd4JJNh9ur2om0imGIXch7poAahrKbeAM5tKT3uilLbYmX3ms3oqDB0w1MvzW8cOiF+UM5Oa0hazYvkyt/FLj3Kus5ioW4Gh4zZn/HWmrVjLaTsPT1rJUr5a4vykaAFbxntlfffn7kWreU1FqHIkGBCb6PlgwMze+YsDXhcrtbuUWdLWipoWZ3ch9TN/DLA1AcO1SYC14cunCjlXPF9LKZrORP8pkf59AYCR4nroJDayWuOmnMwx388roOzZsV4MXf0wYuvvR93vP6Da+7SLQUtaY/xm31xilnX7aV7xk6KM2ZIPxAk0OVpHswmhelwvztmobQH4bVwvC935QbECbWFLcfMp39aVwxVLrEDT1I726JmeRb/xrgJbMHLbArgciMJzameulnU4lrjJeK1ln6S1yWoqv2hTuqh8gN6DRn+ViWsAJ4xqSPOVSUhgeb7GK+2R7D6TaoHrwjPHSFfzcOFlhMQXdhPDw/odYkIF0jdQ3UP6/sWBBTQPdpXA63psK9G8J4Gjy/Fk6sX2f72vUk9TbS5sdAZgfEg9drxdNTxsSXniVcRmxojdFzABdHWiObGhr42XRFy9NHM4K07DblZassMJ+Qrfqjm6/MZegEvv9lVLlSkJ3IasZKx9T4Irk2QJHGqWLjuyDWCcL9yuVVytGbByxcos2qROwhvoHUXzZJ0TYEvmnKKaS/UNixZXQ1kiLVOpZ0U6R4MGtmAdzZdtAVwaVN9KRyp9EqjwUwcHqlHSN0ITaEOzZiMLwA19oanCecPI6soSi8RtTXEqiY3Awq7m76JrO8EzGOUgwbYlP5YORNWvgFiDF7hoXyop7nyOP4D9+9Ec2gLworle/Ek7ah2f2+gY7DtaHnCi9h++T3+iMd1MQ5Yk3sj9+xaFs30Opap7oqVq0a6x2jlXSJrADJpR29kPsKiUdcQp5gtX0U377EQTJZ/5yBy9/XUiAb0jaF5tfQsAcJ1YpNsIAFjdi2bY+n/JKACg61Anrs1dAwA=", - "type": "sample", - "tags": [ - "NodeJS", - "Express" - ], - "projectType": "nodejs", - "language": "nodejs", - "git": { - "remotes": { - "origin": "https://github.com/redhat-developer/devfile-sample.git" - } - } - }, - { - "name": "code-with-quarkus", - "displayName": "Basic Quarkus", - "description": "A simple Hello World Java application using Quarkus", - "icon": "iVBORw0KGgoAAAANSUhEUgAAAGAAAABdCAYAAABafGNLAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAABgoAMABAAAAAEAAABdAAAAAHJ9pkIAAALiaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+OTY8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjkzPC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CuvJJYoAAAu0SURBVHgB7V1dbBxXFb6z9tqOMV0nWSdU0HoDDwjU0LXUByqQvJbASSokbwVqkQrKuokEaQLd8KP2AeT1W9ogZUOTIPUHr6MCbZWSjQSt4wLeIKQ+UClOmweE1GZTqUKuN6kXJ3Hinx2+MzvXbMYzu3d2fnbG2Ssd3517zz333vPNnHPn/owZa4aGakAyW/vO52ZjpQCLSUzqlpkcNVt+PfCj79Po+1ygxHITP+rJWemTEACK0iU5wSRpt5XK1mnZIpPlbKBlJTWx7+682T5WBWDnb/4TKZVa0xA6ZFbwHckvy+OLS8Fk7uDGOdH+GwIweKIQB7IZCAqJCmvyMSYzdhl/42/t3zItoo+AHtM3j3+cgPJPI6+pfD0FVUnDHd0LH5FTdFiFj2eteQKoIASMcYZmXL8G4Kj7aj0JtwEA5UcJPVTZvPPr13tlyeLiYmukmk+4zQRB+eRwm8qvVKG136G2tuVMNRGrAKg2q78aczOvLg0M0TDeqOQqAJIkJY2YmunWNFCidyiDoPgA1fafN+DRT5bYBYy5svqZ6zxVYhiis/vN9BK+YKOeL2glIQFZism3ueOqoot4BY9bfQWvWoP3M1PqaFHYZwbbluLoVkbbNcUEQfkxbYbRNYZWsTtc+YpqMLzMYGomYaQnbXqABaLaNLrmPqBbL3NNGl61a41r15RZxwmTT4Sz6N45kS4aTVxyAMRGP4EAVdgMFRqQLPpBDkCFSOOfgRV5zjj3zswBANNWem4KACsVGZWV2UOYW/9WzCjfrnSaXBw89nHGLnl2yVFGQXYJq1MOhnOlKQBxBnFSYhP5OuXoFlOH2GlMLvbDaQrZa11BDiV6AQDetSGMCYYAxChjbWmJZS2Zu9iRT7rbgktpxry9iNRwE8S1XxGPMLaYBxCJijRTP3ccm01iDibvhxU8Lz0BlUoO4WKsDEIgJbE/5SozjX7TnMtKgGVkzMkb8Xgt3asAcD31l/3DrnGsMgGIiTzPqIzVpdNMibF+qTLDB7+9DoCqQrLjUhxPBGz6//0D2fn2tpVUqSQ/6QNd6zbRiz5At6FIJLM0Av+AYeuuOM3FkJ3HG6ZvlU8d9RMA1F7GYtt75Q/HX3z+u5sOfSHcSqD4OvjEBEHHka1wyz8psNh9Ydj5zREknXikm719aWnmV3/779Zrt2RfAuH9J6D7U4wdGi6yfz9/g5Sv1fKD24JbXx3evLDnwa6iNs8P194GYO+OBfbBb4vsqe+EWLC100ihrQG24ZG+jtDrezYXv/LZoBGbJ9O9aYJi2xl7bt8Mu68XdodtENVcV7sUOjwUYh8UlgupN+fDM/MrokUbxuetJ4DMTfaXM2zqEFOVX5diPh9uDZ/8/kZ2cKCrAFDqkuFWIe8AQHZ+5vcLbOirdNfbEnZ+qSP8SmLTzV1f7liwRaADQhpvgr79tRvshR8vsY1djgwpgy1SRzLWxR57oLNw4u/XuycdUKIVkY1/AtI/eAHKt9IHobI9XYHgvq93HhZidpGpYQDQxNk7Hy5l2ec2Z9HfCGjcwX6PUh1b7mr5SD58KuvGApBoX1wHgCbOBo/P5nDKZuruu6QoGjoFwhwPS4L6QOdAdgWStQ2UUWmKtbehTloA2oVZ050RpDc0uAYATZztOF5I48DHJfS4X9Pr3bjOg+LYoRdDPAy6DKo3UNkBUByUAFGdQ6CKQBN8AcwrPZSSWby7IsPVn64AwBdIakyckRMekWU5j3gOhDuVjYLMvOES70GAGEFMlAeNgIyCUmd5gq/+BSAj4SLpjgJAdn7wxOw0Nn4dQWOosyKhF0ynQVmVCIhxUK1wFAwR0DRAnEY8BjJTJy0A5UBUn2vBEQBUO58lO89M7qGs6Hk/fp8HJVUaQHwBpA3nkNAHSqs0hfh+UD1BqbPsH9wxS7YCQHYeDhYLJIqdH6pHAzplnkRaHhSFaaG7cxhEpuYy6GFQXKVpxLtBNgTyD8q6dMoGYVVF2AYALZAE25ZJCSNVa6wvM4RiR1T/kMfviGrnSRqvk3jsDCRvBCYp7+Sw1RYAyNbT6RrJ+cXwXihlChQFGEnEp0GU5mSA/BLeHZwZstoCAO2WprNQ2Pw07qQmNLK7NddOXY5iHTpitCHAaqW2AECNoMMHkwe2JDDUJId4zmrDPFD+DO78bRJ7I2V1k1i1vtgGAK+Etq9P7u+JYVPUw7K1lyku0u34Al7QBqD4uFN3fWWHbAeAC6e982/t74ngGo+wqZcpLsLtuIitLweh+KjoRjA7GugYALxxeBpSgcBy1GX/wKsXjY+W7fyf06IF7OJzHABqKH1FhPwDzpYN4NJL/gFtUex80kk7Xw0sVwDgDaDREvkHOOrhBvuHy6qdx/B5Is/b14jYVQB4B+mA29Jia7T75ckrPM2NeLnEFt5r3XQRdh7DSrENv063qyEAUKdo2Np54Ng82/Y4Li4WnO4obeB6dOzKhp/JX3zH6brMyG/8mnB+BjP3T4Wx5ZCxkz8tsHt61my+MtMhLW/+6srMs3+Z3/p+Ydm2xX5tHVauGw8Ab33uPcbuTYQZbcY6tk9i7cEOnlVPjK2KxSO568F/vH/Tk4rnfWqYCeINWBO/eHYD+8xjHeyls3WZJdj5G6+dv1nc/fLVEJRvuJtuTb0NSvAeAKSIueuM7f11WPEPFy4JA0F2/vHffdL50tvXQn7ZrOsdE6R3B5J/iB4o+4c//qJotHdo9lqp8Oxf58PvfrTkaXOj10VvPgHalpJ/2PRoiD1zqsgWlxd49tKKfDOdu7bwvZNXSfk82Vext58ArSqfHguxw6cussEHrj6z54f/+mf+1uz8LfkeLZufrv3xBJQ1iskyNixdeXW79Iefv/v0Nz79yut7w/cibRREeb4MfgFAnSx7I6PVMk32NWAxSNuMuq+9DoDQZBlfDPLgZF9NYLwKQF2TZR6a7KupeM7gNQBgy5VFEUuTZXyyD530vH/wEAC0oE+L37UXRWiro/Jta34b6cSKWeKLQYyd0WHxRJIXAICdZ30SezMhuihSkrBbTjb+FGSlZpXFoP09ccU/0JcePRYa/B5QgtLNLYiUv1ao7AXqpS2Qot/sVz80GKUyXsKgoU+AWeWXFSeluAKxBTLBf4vGooCJyrPK11AAzDa+4u7nRWl3nK+DrwDAflCtwkMEip8R8A0AtP9Ub6s79qQ2AXDjDsRZg5RBPf1ec6wG7dRN9sUToNz9a8+VrXaotNKSWr3w2Q9fAFDl7i+rW5LidDjEZ7pXmut5AFTz0l9DuSH16+Q12LyX7XkARM2LzgjJe9rWaZGnAVDuftH/3ofDgKqv0Ommc0krATliRXoZAME5EtjimJXKzJYVvfu53Gr/KoTz2B3j490xKzLLAMjKwWgROUm3nJ1Sj+jdz1sOfrfaR1WqT2icV181Lh9AX8OiAICXmek1OfoJobb25ZwbncQnKbVvvfot0qS2B5cTmiRHLkkHJbk1C+FCpzONdKwAgJPsOeFWwtbScVSaAgBFhcuZYFQBrgsAZaraRF1mWanP/NMLem/mRvJgHnN6eRJPxAHrOfwWQpOX8WyM82nqvxcxbCKZD/VAuSGPbRnwsZNP9OjerGUfUK4pbVuFjRYksFijTku7slKGM82Gul0FAFs7iAlrsusiDKkvcNU7I0mZ6gzWc2WcFKU1aiNJqwDQGiqOliaMGP2WLrJYQ2aKFORk31pKyveKDKtYBYA4FLvp7ml3w4bZkJEUkQEnmBHhq5NnVF0KNSx+GwDERacZPX6k1LAzmgyhxRrV9GqK2nCJG5l27dWStAYAKrBeQBBZrFFMr81PPeo9quiwlvaRrwsABwGv2Qfx28+OWWixBv9LM0N9tiEU6RMNZ/eHhcwf1WcIAGWePdCTrtj46ksgROaTyE5bdMakm1HSVa33D9JrZai5L0h5RPHlQXo7pTl3ZfIpgC9X1f8pssr6nf+NxRqxSuQUtkWOifGCiyYwS/g2Hf69o1mlC9fRZGxqYN1r4H/Cy7I+ycHqyQAAAABJRU5ErkJggg==", - "type": "sample", - "tags": [ - "Java", - "Quarkus" - ], - "projectType": "quarkus", - "language": "java", - "git": { - "remotes": { - "origin": "https://github.com/elsony/devfile-sample-code-with-quarkus.git" - } - } - }, - { - "name": "java-springboot-basic", - "displayName": "Basic Spring Boot", - "description": "A simple Hello World Java Spring Boot application using Maven", - "icon": "iVBORw0KGgoAAAANSUhEUgAAAGkAAABhCAYAAADV0Y9XAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAABpoAMABAAAAAEAAABhAAAAAHcadxgAAALjaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+MTA1PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6Q29sb3JTcGFjZT4xPC9leGlmOkNvbG9yU3BhY2U+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj45NzwvZXhpZjpQaXhlbFlEaW1lbnNpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgpGdeVGAAAWNklEQVR4Ae09CXgURdZdPWeSyUFOArlDIOEIIEcg5JiAikICoiK6rLi6q7KYEPlxPX50yXqsrLIeJMiCn7uuru7+6qqQqCsq5OIMNwEChJCLnARyTI45uvt/1cnEyWSmp3uOZBK2vm9mqqtevfeqXlfVq/eqahDhxGHtl/H+Uol4NkOgCQgRYTRDhCGCCAOWveHj0ce6F/xCMtENn5sQayUY+BBEEyKYMhqR50gCXZBKJGVbF+/thPQRF3DlnCas262cIEL0IgKheGBqPnyi7MgcwxDEaajwPhDeT7pusvC9B/JVdsTvMFTDLqT1uUlRDEIr4e2/H2o502E1HYxYC0nFBGL+QZDqL7KXHGkfDOIcKcMipIxv75YRdOdqgkFPQTPc5gRN0YUY4kuKIT7avqzgRxgyodM5TxhSIWV8m+hH0OgxhkEZQHi88zTDAE5KCYJ5s6E74JPPH/icGpAzTA9DIqSMb+M8ECXfBK/neqinfJjqKpRsGfSuP3q7o39mpeTrhBbWw2/4bL7L2w8cwkqN1YG0uiSPgllZBJmRm7SGoOQXQUDPQpGRIiBcu2gGER+1qJjjT+UmYyXGqkC5SF9Y93VSsFWF+wo5rCdlfpMSR9P0LqATawuDTlKWhmlqlxbR/7sjtfgmX56eyJ3lKiMUVbBC+EN2Wn4O33LGcHbvSSs/WylKz0t+CQRUDMRGg4Bwm5GwVlsrZkQX1u9JWW7ciOae5YRiLeT5why3whwMn3S79qSnvlkYiijqI1h4JvEhPkJhsOaXrQ5UPbNr9nGsxpsM6z5TKkQuzBXI9IcPJSXQ2D+n5V83CWwh0W496ak9ylSSps6McgHh5sQv9npZvWLfhtyFZjVUkZx5AeD8ZSKXLvgVaRgiFX6tCnYRUnquci2JmK+BA72pxipmRlihBB1BnTClVEB7RIMonxkj969fO/eNErZeyPohzzYhwUANFoPXYVLdAYyIRlgj24Ndf2jAHzLylLf3I4M26WsP6W/mvnolwnvKFBKReL11xzPf3+nWDycgYrWQsIIAzH0MPD0vgN5oBHUjGCZvfW4yqxxkfKPMhEoqJ/nedibYY2I8QqRvtG/cOUhzUWvUC61pAOuEBJIJdGl6H7SW1dYQHYVlZKBNfLY+V5kFAnsdz0NPzN2Cew3bvimRK1m1HXrYEmvqbpWQoAe9CUw9ag3BUVxGzBDMZqiffEPC9hMSUhqpr2uUz3Q2Du/2UrAKCtaoBQuJfVsIZqOegf/+DmyBlVMzC8a5RyQYppJIFBTqNfkSpAWnf5s01TCPT1yQkDL2KH/V97bwwX3LwUz2m38mMWwF9oUNCsrwe+vYRJpcOijTQgJvIaV/kzQNfC/bLeC7ZbNDx0y++OTc14KgASSmGiHaLw57k7EXRPC8xEtIz+5e4I5o9AXQcDXFwK2eFu495cLGBdv9QJNjBWGqPdyk7jEyiUIFefOxV8AUjLk0XkLqQeL3AMFEc0hu5fRw76llG+JzAqCPmBVQX/tIZgQkXIC4mNa5CDKbWRQSuBpWg8n+l7eyIMzVfea4lBNPx2cH8hAQiyIueAk2EYFeDvs4BAQxFyzbLSn0JhfMrZqXOunxojujVmM/E2cbGrZP6JjoXr8SQgsN0y3FuXsSJd8CCOBN+W/Qt4BEJOt+PumDAyCgREjjLSBcHtZOET6ugVjLm5a5exEMkfyCWSFl5CbOBRRP8kNza0BF+84u3bI4t26cR+QCa2s8N+jOCiiLGKRN5ovDrJDATPgWIDGfz5fCKICTiuWqdfO2/gifGENLgjVVm+q/AIw1YFBDSMm3vMnuuj4vOQV2Elr9tvAlPhLg5gUvOfpg7MbxYDW43R78BihC2GEOJIXnM17BpJBAQC/yKj2KgaL95p59ZOYmyk3qiYd9uwXolZEysbxTreuZhr23fHbRDhJSnxNrod24GmGIJvrOKl0z80W1h2zMLAexLorwnn7lQtORWNKFmA008i3RGSQkmISetVRotOVLxFJVYug9RxZPeMTXReI23dH1mxYw/yYICZOZB598HOEKA4SETzEAsGADIBcBZ80TkxJ17NjEUmXEys5Qr+iZiECCFpi21CvK5zbWvocQjYVkMQwQkkQq/QU4rUwaCC1icnIAESnWgqf06tSABY2zgxZJvV3GYpeBo4Y0ztbwdQ3EhlhoaoSHO4thgJCg1BqLJZwUwFWiaHOVenYoJJ5d3q5jOwLdw7sCFMGUvyJE7OMS6CsTu0QA69j+iD/DGkSkJNhV6tHapWkf/9u8hDGWNlz2C4l1RdBDevSEs6FkYldVkEdUTaB7WBs0uBoam1BIPUUuEoVUJnKVS8VSuQiJ3cDyrIChyhOQ6T+ceJ0kE4V4RteUNR/1kjLiKcBTMRdf/UIiaZTKrrK4oB2YN8YloD45/L7LMX6zRb5uQYGwaAwDcjEOJDmsqMPHxLSBkAiaofkLCTyuMHEKdr/bVFEPuff1pVGPnZs5PsVHLnbDzN4ydsKQMTE6tvEQmmypEdme9Kv9SjmhYobMwhCoCK9cPePZmhCvmDnAIG8blqXKDHc+wzDNCLFDr9QSL2MVIX0nTBj8cnIGVkiKDjoebHUOP5YCE3jdurit1X6K8XgVH8bJ2QjMbFe3VHrKffGLZzEoJF593lmEFRrOwAoJdlgmwXDHCWhLJt7Bee+U9OLE0BWz4U3jtTawhd4wlaXqOiq6QEi8yEtEcp8+wHHsNi/Y/GCuIBgYQF8nLHc5cwgspePes+XO3LKksHuTQUBuluBHan6jqvrIWEUo79EIXlxfMNrieUm29qt4P656s0ICAIdoURN8Zpz//aJPRXIJqxRw8THS85j/O/uWF2ioQvbUiaDXNeOKS0Ti8VwNQD5xbBa2MERxAVmTFx+SdmT9/HfCYA3D2wNpDR1nKNOkqj3s7RLQCbwIGincJJ64DHjtEGuBYOMmvkjXBrcJkG5RGzFR1mzS9MDE4+CDwXcy3ApbwHr+UvJc8J0THsYnJwQFhdSjhy2AGO7hjiJIzq4miCoAh8F22sdmvYxNL3YVvFA+hgq+tKH4cLdG5eavCBJ8UYib1EON+WRobs0azoIyY+xVIYXM68bTC3I8YIhztxdOZ8ajpTWVfzv5StzS6EfPAJ8yoby6St21uAyJuE/lk6A52E1IT8/fdhG0lrFCmR2h8PQHJZvbKVongfkXTxnCA9Ort8G+Rk6tkISbr/AtVzaHxLDlh8EIyttvbzPBYUZQ1lxSdL75UKwy4v6joEpbN2Ugmq0FXOrBLSSwNAjal2yqbaQiac99UzLDTOWNxrQurap0Z8kL8XiRvnTSr7Gj1KqArw3DAaYcTqMpDIeMYK2kF/XP30sm/QbepltjmGMY+sabhY9742FucdTDh8Bab91QB81HExQrHMSQbT+35uAYHhRZDWNwFr8U7PFUht8XyQ96xENp3j+2qaalu34c3sm6OOqRCFtq1Nbdwg5zDGIsCYmxSUhxQXefsHpMtqWGw1D2u0t/O1raeIjdqPLozJeOwOgxzhY2OtTX2cUvDHb4pkuzgSQY0iYhLYxY2Tv7mSUxOjJO1efnf3fp7wm4NuM9Iiumjk0weaJPSG3b1TdZfQAxFnqSJQAuoviUNWh0sVwwoyHv4vUTBX89nqXEdUFIRKfPe6sDorYu1qkOTSu7/GEoEbfiAKp6JSZuTZjsH1cG5QTZq6yhM5xlKlsvFL53eGOSnodVU58ugl2tNu/No2jtNax8YLwaBpXr8Zv6JSkdU2kqg09ajH8cfqNGbbjccrLg7eJ1iXoVGTaPXI4PTZtnjwqrNG2NLB5QF3et2HeFCyfZrAmoAwANF5C5vEifWFu7vDnUw55+uqEoP/vQhmS9gGBLWGfmgm0iYEyw+cdUZRpU1V1sOiKwT4kzkH33jFZzQpnJ9JT72M2kZIbEcCT37CnbeeCDYy8pDYn/LmHnGXwIzDDNlnj5jVNY4LD1B7E+JS5crPscAE7BR/CiTIykNlsruJgb6jzoNY3bD/3P9UstJxcY0n5w2sYCUJCSDdNsjZ+8ti8c44ALUvC8zhlInAuqxSFOKDOZsE4YNT1Jo+u59PJPv6BAQFMMqxsXdFcJzEOs6m2Ybksc9trVNXXWsvY+aPtiS7hYIYFdyCohAXJ9T7REx6nz6zsqDmz6YUUQtiQYMjrRZ+a51TOex0JjhybDPFviDR1Xr+rLk6QG3xPIGdhGJkWuJwiqqwcgOa2xxphgeOiEMdUuVnRj3EPxDPy35ZbtKv2x/J8DhjdMG2ty6fPfCoSo3b3L+RVf9I5AiOh5Z+lBPNVwBrYnZS/5DqwODHtghhPaKBO0x14fvVH6SHi80d149KUf7teYEhDevLkxYQe8fBYv0BBcVXgxWo/V/TiJLcigc3wQsELqBURf8ilgCNOtVVnUTAzhnSEOjdSx9/InRVk/rZoLmxkH7S0I8phY/lzyB3LYfjYozx78V908d0FHa/s0O+rffHD2C0lMiHABQXa4+o6qNj5EnASGqm69WLT5x5XdeRffTzTF07SABad+l7TT35Ful9yy91mtDkYupl0h2mGKD+O0fiG9nbbvGtwuIEiBgBW5XSdUY+bs9dzWc71ka9HjV7cWP5nY2nPdpJMOe5Yfn/NaDMyxDltWqDStpy63nB7bWy906cOU/FY+dRygnYGn8HNQCQdNouYQHazOi7p74iO49/UL2xzscKRr6J6yT05u6TpZnz+Hi/7yyU8WLop4CNfboS/dJ6de79+gQyP0Fy6eDPMGCEki0X6s00r/CAC8NJq2nuYAcCWfgVN2TmUJ79S0nf7q/Huaktq98C9m5l3T2GEJF2gcivKe0W9ANWwce8Y7tR2nzzUd6TXMIqTzO8b/bxEG9IC37zp0Axj7uxDmiiu/dop5CY6ddFa3Xih692BG2Qt7l08/Wvv9HC4Bebn4N752+1dlQyEgaE8NWNJ/HkYZ4vusLMs2O70cBvQknEjS5Ls0Sa+FKIx8lsN/Ln8Ud3vkQ3UkKRqwELRc0i4QalClTx+o2qMtrPxqulrXZVIhMKaUELr8MNyVOhGOck4zznPE88n6/Qdr2i4p+3AzlFT6WyF0TAoC7rjLAxkt5YsIdswUwaYMXg3EF6c5OOgdbc2qa+eP1H5HH6jcPa1Lp/r5DTVXqC8dHybOmPfn0vEeUXY183CRxeam5/amhut9R9DgJdvSCvD5LN5hUE/CJWkCvQbj4BKImhSiMfZvL/41YV7w3SV8D1AZl7fwTENFL5e3nGoort7tdb7p6BSaoeZbKDMoe2HkqoPLo9dGwfpnyAQEi/2WN4qekOsFxG7hYjRPDmLOQoJZIWTkJX8KJtqHLJTvz8bnX19e9IXG1s0ZgJDRUOor1W0X647V/iA7XV84sVPbZrUhN9J7WtmvZ7/So5B6zehndmgiXTmHN1Zcun68/zgM/D9C4bbUAsHWdJM9CdeBZKjnaUJ0D0Rd+NSpveeG76v5D9duSv6wBt9TwKdMHwwDe6qvXmsrrz1x7SfJiYb9UYBrAuThj9UB7qSrePS239cHKEJxrxugIFmNlH9Bzb/Obj0LAorTF4HeQMulOt4vvb4c/jXbk3AmzE2vAMiLOM43YK3pmYSdteYuUII5pb1D3Vp+9WZp+9mGYrdzjYcn2NJTjPnCFzitit2o8nMdh8f9oRYOkGRuvl/yYtXZxgPGPffj7LSCNcb88nnmFBK+6ot0YfAf6EbwQWYIc8eE1QdAoYikGJ2qQVVZf77xCHG2oTCwpr0igmEouzaeq1jRnhK56rQy/H4/cHNHG/IxlHGK0Va9UfAEU6+6GjaALvyDNAjIB7oEXvgLDpxCwtgy9yTPgT+0OwBRdmeLYAoOKoB3kM4at7A0JWKlJtA9YiaQ4bUAdxA7NKzRDmQf3jgTlgEKYxq0jrx9+4r9Pxmn8322KCSMCP8/BQxTm/kidRScXKzomBdyV2lCyDIC3Nl49T6cgmGriU1Pu468MMijq28DUBa+AGVhpf7Zml9eQsrarxTDX3kWAgGzqq+HzKcZbpqv7dZ0SMtvnJkEarJZpUQIo7DZ5Rqc/ymLD0lVgIqPx3mZkPKOglXrui/ChhVVceXu28xbNpi67NTCYGuHOT3vvISEgZ/+ShlGiVnH4AArMj7mnj7/7YMTvGPxtltWMODDb/j09J9qsGlGT0jIb6BH+NU7IldXTQtICICrMPEcw5tPIXSEwuITFVWtZef+c+lDr/PNRy1ZKxoJEZqdvSS/VigdY3hBle+9hprcD0j6h5l1cW8WRPvNMaX7a989mFl+5cbpGGOipp6DvSZeAfNSzZSA+PFSUmb30/CmaPJIo2A4u1x1s6zxYNUexcn6guk8R4gOHUOm7Vi2v4AHDYsggoSEsaXnJS2DS62/hKgI3zG3ZXEe3hdhcghq7rp26JV97C30uOigEOIZVX5H1Jpa2K4cbOtVzoOQC0uAA49Ms1rb2dTcWdda03aRBl+Z4kLT0SghZqc+kj1g7F2as6xwnzAWzEMLnjdyUgv3wJ8tZsJ+sZxA90i8+9VsT/GW+weZIg132FUmhN5TNdY9lGhW1SI4UtJQ316p6tGpBuyI1VI9km6dWgZvEuMu673zAJQHjVQko+B6TtpN4qHF+EHtpiUil3711lXqRovJge+NllIT3dpOUkurUZe2TaxSt0lhw7y8pavOu62nxRd6CL5vwtY7J3Rw1mhVTpr9BITrJ1hIuNC2tMLt6bnJbuCPeRg/mwsUQeMdSINCfUdl2Oel74QNyuBIaOrkyHSOLBouZH80J7Vgj73ZsXpRmZNW8EbNjbLtMEw0mmPqcvOJa+byRlm6DrYePJ6Tmv8PR9RL8JxkzERrV+N9ni7+n0L6gKEKjnZUbNq7wteKMd2YhLM/q8C6/UDOsoLvHMWozULCjDW1X0mQiN1fdZN5BpEEIitullZ/cHzzdLgI1stRjDsJ3jq4nzM1e3nhSUfyYxchYQbx/4DD30z/C6JD5q9xZMPwwH2cJKh7300rruYBaxOI1XOSMVW8JcxHgVJgS9QfIK9f0zKGGwXPDNRhm0/39fihEBBuL7v1JMPGf2qPMhX+sH4npA3HvgdDVuwdr4GD4A9n22mRypc5u/UkQ4Lbl+XnEaKeGNik+Q6k6wzzRmgc12GHSIdih1pAuL0c0pMMBZG5OzGWJskdkIZteyMx7CNpesO7y4vwTVzDEhwuJLZWYJ5Iz1Muh+sINsFrwev/GYalNQYSPQPWg5ewhWVg8tA/DY2QDOqVmatMgAXwczD7phokO08UMQdg0+ufslML8uCFwkrCsIchF5K+xuvzUmaB6X8NPD8InwHuDz3MEP42Yecco6O3Z99TdH4I6fIiNWxC0nOHHYrNHcRdCDGrgZk7IF1/X7YexFG/zXC9yZcMTX/W2BNQ0HcK31G0bMI77EIy5D4riyBb5yhjdRSTAm92CuRhTzC/29ANEZmO18LYVQx3xx1kKKa4QR1wxpkFY1gFpxKSIWP6+MZcpS/ov9HQwNE0oieBLysAjJkeMG94AowHpHtBJVQQx26LNhAuBba0BphPrsKUUgF3yV3RkkT5e0vzG/Q4R9rv/wN1zenro4tofQAAAABJRU5ErkJggg==", - "type": "sample", - "tags": [ - "Java", - "Spring" - ], - "projectType": "springboot", - "language": "java", - "git": { - "remotes": { - "origin": "https://github.com/elsony/devfile-sample-java-springboot-basic.git" - } - } - }, - { - "name": "python-basic", - "displayName": "Basic Python", - "description": "A simple Hello World application using Python", - "icon": "iVBORw0KGgoAAAANSUhEUgAAAEIAAABCCAYAAADjVADoAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAABCoAMABAAAAAEAAABCAAAAAGpSBCwAAALiaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+NjY8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjY2PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiQPAV8AABBVSURBVHgB7Vt9dBXFFZ/dffkGEmISIYSEYAhKjKlgRTBaUFoFi1IFxY/SD49Qi/X0HE/VeqSntp5WPVVr29NWa1EUPVLrQUSRYkQPICCoYCBBvgnhKyEkIeS95L23b7f3zuzdN293X/KQpad/MMlm5s7cuXN/v7mzO/sRxs6lcwzIDCiycFbKhVUDBg9Uy4vHXT8sp6BkhBkJDzMVs0AxzUGmyTJMxjTTNCLMNENwdBpMaVVVrbn58/ebwu1tR7sONew5K345jJ4lIoqzL5n9g2l5wy+cmTd8dI1h6OUMQQNyAA0HwMeygbJ1MKgHmdfbdawz0t25I9iy/7Wt773yCmvbecrhv2+i70TU3P5QbcnY655LyxowNhaNMD3SC85KoLGMRBgA3CpzMpAEkpEg+AFmoE5halomi4S6t7bWf/TTPSuf3+AbesmQJpXPuFhzy8OXVky5c1UsGi5HAnrDERbRYyyqQ8AD5SrSjiTgjHPQWBQylHiZk8JJQBnbDYaEKqoyJG/ExTPT84eub9+xoemMnXUY8JMI5bK7f/N3IAGWAoKPsZqywWz62OGsrCCHNbcFWW9E54RwsBZIRCtkkVvo7Touc5KAkHBv5sDiUVd3HNv9euTE0ZADyxmJvhFxXknNsPJJMx/Xw7054WiMXX3R+eyxmTWspjSfja8oZGWFA9jar46yGCwJnqxIcJLglkXUYD1GDSyV/PSM7BNtDes+EYb8+av6Y4YxPT02NGbECtBVXAK3XF7KAmrc/MTKIjZmWB4LR3VEZs+4wMfDw67jUZCgE2839DDLOX/kTTiMX76jHd+MqYqZAQDgqihOgppEAjmswYkCgcdnHVqSACYdq4Pdx4jFWCA7t4jlVwwgu37kvhHBneEoITrg5PjeF80J/m1v7mCNhzpYuoZkiBkWJMQvp0KOR0tcFuShjAeQobBYxLdljY4GErw9EyEadzYjoLIVWw6y7t4ImzCqiB3tCLJlnx1gwXCUBWDZ8NmWrhI4LMC361FCwLYeyUILLqjQ7nPyjwgEg85bP3ieqNt2mK36spnXp0EF8BPXsXQRD+9HsgcBoCCsWm0o+538IyIAc+1wFCMDThtwwJXCaiPQKMt1Thn15HYhUp115fGRDd+IUAxTV9IzQ6oRAwwCOAKBs6cDEMjWztEwDcOMRLJg0QegFtTFckhGChGj0CX4/5GI9gP1G+t+d9sYRVHFdOHOOhMOzBNSjyQpZuWN9/110LDR041omBNBYIkUr8gQRBmw+PxLpxURA4srC2DfX6pqSjZcGnAS7aQrsIuIwF6YajOgCcuYJ6QsS4KzK/xCdIAGKHpFjqsO9Hm0QR7I1CsqBhQOHZo3hkUN2JyIpEEQ6ooaDcTCTR9vPn6M6vvLUyKi+taHrs4fUXVvXunoCaYeGw4XPFz88ItH/PKXcDcp1zv17L4m3EeE4UrYSzxYNgUxzqhAJazjy619T+ilhd+bUXtj9SIWjLhxqsqhbduOrXhgQd1jH3zQfMStkFjTNxEVFRmTZ/36qbySyvm6HtWioW5vR3FG8W6SAIJs31JbZTpvQAtvQ12MBEEeBgRviUcGtuEP6lltcfschBLuhd0VkhDE0HIk0yypHlM4d/nim781Y/abN6386MhOh0aC2NeGKjB9/sKXcoeNuh9ugTW44eGOofO2Y8LV+C01yQQSZbqlhmEJGOZcIoBSP66DskWA1YvLol+C/94CLh8WY7CRYRkqG7309RmLJk0agWespCkpEZMefu0BNS3j9mhP0HLTIoCcJ0dtGfmRdLDeaoOCKFvtskx94nXedrCdH0mhYAPqAAH8PIJXLijDJi4zJ238b++vubuvrp5EDB5ZWzq4ZNSDOicB7bsBxh1HkMl1ZD1RRnOJxDhluQ8RxfO+kHDwFgkYDUSGFRlV1fn3VFS4T91k0pOIktqpt8M1Ph/xOZ1HOZmjTkAkx/tg1/jJtT878X7cE/IZ7sRhl4YJnveARfjFpYARIB9ABhGi62xwQXr1gnkTx2I3r+RFhDK47KIb8Ex+Oo4KgDCEgyinDXmG3URRpAg7drvDc0XR8OyowwV7kACPEUBRQLlMCpRjulpemjPFYcoWXUTcfNfcIXCbW2nEdMAgHHOC8wKD+GXQyfoIHVSN26Yy2U2wY7sqCrBhY+HuroM4WkFhThXQASUCDWUeGVhH9VgHR0+UXVyTXyusuP+6iBg+YfoFphErEhMrLwPhfN8AvcERQAEY7PBwlmxjjRiQtwk9ZNad1PQMdrxp7xvQooydOPwq1hOGIoEmQiC3CbHq+PLIqKytzh3sturxYCarsGwMeGWtPeEgOo4/fKasMq8RYSBqcIZRBxK1Cdndz36CLTS5XasXTG7cjtNhjIZYOFTfUvfHRS88ceX1EO5VfUcDEiKRFI0WRwJKqdMuyq6IMPTIaJoR4ZIAYpOAgK3Z485bMgdNZcyRFJJRgjLKvI7LWJR0JF2u5/AWSdCyBh5rXFv3/aoqpt1xZ82TrCcCO1wJaH9lZqbfe9tITyI8dpZmqU2Ey1Fv522AFhhbhtnFx9ZqIB2qMETh6RIcuEVOHANO/1KdgwMwocJ7jWBd4ydvz49sf2vX5sPzFmelsWoWBhLsqwWMRWU+CTgeHFTmbSYbNTKnzGkfZRcR4GAeAcE80WH3DLp1cHAVwAeYqSgHI8HO/d37t50ES3jdhLt1dBjdAz0kCn6xBy0XLIqk4KuQcChi7u1pP76iY9PitXdOzR/04vJ7lmSmabfiRskLqKgDi3wczK0DB4IUCetFwn7iXxcR0JwGTicQgGQkAgYtR52QDf5WCnaja5rWr3z6yJqVdYwdOeP3D7OmDil8pO6W+d/4ZvEv4N6iDF6QoANwOIDyCLBI8IgGzrhpej70dREB+KJ9RwH6IN1g2YQYTMsYoB+rX/PozqVPPQla7PlnJ99wQdn4azQzrRioFZsgbOgr8YmLwXIALwwzfcK4oqLMgqwL4ZVZAesSrw9tApAMF3giRyKK9NBXEX8uDzyIMDtp9iknYryjQEQPXNaM5i2r5+1755mFLzw7acqc2Zc+k5GbVQ1vdFyDJq3gJzsHgB6Y/ZP4MEcGbc06RQS1JUSBpS8TBXrw4DDoNb6LCGC42V4K0JH/0KyjBGXezl0TZTWQxjqb9/4ZSVj68rRbZ9w19lXW0ZvOuvAan0JKcDaFGZUBp9wX7arwOsHwfDbhIkIJBHZysBZoHgUSIUgMJiIEc0UNHK7/cNFjd91YPhpIeJG19aSnAB+twC9GDM0e2raI4Dm1Y53UJpftvpKdpH1NtutAaI+Xby4igscON3LbOBgcMmDujF0nnMNoaD+48012cFvHLxfP+RPrjg70GshVJxOQ1HEHAQT6dPVxLMSjKCf/tmS/JxGuDdXhTe/shTlu4U+abdDepKBt3Ce07tz+7viK/EFjLhs2hfXqLsyJFWgLZw+dc2yGeJ2jXt4ZOvU5QIe+lw7ahWfKHSfCjZvqe/E+xZVcRLzzxsKj4e6OXfgmF6MhMQpkGZsgYuAxfkvDh01anlnKYuYQ1wgJFRYJsrNeQL82SbTBchCMhMEj4u1bO98Hd1DJlVxEgIbZdXDnKgV2g0iCc2l4yHCt02KqobmeVyeMxmfPw0GqTykakgC1+2J0WAcn2BoPvtdgSiyy8O3WtxN8kgQvItjR+rqluKsj0M6owHpBEg4KZdgj6BBAkl2piLoOAmzHHfUEwqlv1ycByvXBliuSQB/rVJMFu2JrXl7W0iA5llD0JKJ7z+aGSE9wCa5/Dhgw2qSIM6klY3MS/DgMzTbmX2sJWETJs5tAkkSMPBYRQwTmaOxfy1qfAyfQEc/kSQRq1r/19K8CmTl8T8FJ4BPOzwlIS/zHkwiMAhzTw1GbEAQpAyVdqnPICWQ6+1p9iAwaF31Ogwc5wdi7P16we4UnA1ZlUiI6d6xvOrxl9Wy49T2kammAyYoKpMBaGlBwRwTNQsKsyI4SQMqpDXJPklCPdKwyAZXr7b6WXQSYrcENqrF91tyG+SBBQ/Lk2kfIql/9+8n1xxo214688ruP5Awtn2bGjBJ7iQBgvJNUAxnwpQzQjonPGjriiAicGSKIyhwM1MP6ZRl4GwImoChsOMvYHxtxqaIuypij7CjTIxZVaalv6H73wSd2LPjPutBRUO4z9UkE9uzcsbrpix2r5+UUVg3R0o0RhmImvihRFIN17T2coRUV2mBdhKCzOCHWbHFQUM5U2OZPWx//ycMb3+vTS0cjTIYyIMtM1zT87ART4t4FLmNRRdEPrtnck/jZjlD2/NsvEdQreLwBX6ji4ZkMFT+GwBDGA2fPAk0RwImQCaGZNNv0iLEvlgk3AimkTMNUQiwU2bKFnUhBPWUVi9GU9ZMqXnVF8ZVr3p+1Dr4PsmbfCZoiQiIJCVONELyXg28C8DW/HDFWf04gnSOwL6yJHLZv8nc+v+bjrZ2dSR06zYaUIyI1u5bDCYD6iYyYmc2CerYgwUES2eG5RQwvq/l6LMXnG6k57n5Ul2I/bzWcPVwatBxcAGSgGCEyOCpbuQkPYfCzBx4RECwKXLmIGNNxUvD25rRqfYwIdBpJoPDuB6iLJEnfABIyKxnLvQ7sATGdsAUIwaZQSfHu/rQoEMo+EoE+SyTwmaRZpnqQiSiKhgQ9aDfgHJMxkrHSP8B/cuQKL3O/zdh+2Ar07j5rZKR0phbe9P1XiQEyTgSeJygyrDIRRCTYehgFSBLqWTkSMejaOAk4rJYnooPbBTmgmtHskziIb8k3IuC+5Cjc4XXFo8IC5wRqkyQTgIQQGVjv9QWMhRu+3O1oix7/9FPW7RsLYMg3ItZsamkKdfV+jA9A7PC3Z9oJ1IskJADq8UYPHxtEW+M4oy3wABfqFFjJsG1uaOh6SwwSVznTkp/fM5vlRemNY2uL7mChKOw+pfNBfxssXBZ0zsC50dvgs5+NECRd8H3UFsZa/gLnh/1wAs1k8N3U1vse3f+zPYf5298zxW/3921DRRaXvnDF1BkzR7zKOnvPY/Bxuh0dNlCceSCJg7fKdO4gI5jj8jBp6wyRMBBI6DG23Tz3y5kr1p3aJav6UfYzIrg/S5Yf2rNv14nl064dMjwwUBvN4FaEwT+yCEKkKCFiKBqcaBRwDfcOmfDgKyc9uK0x+Pycnzf+aNWGU4ecqn7IvkeE7NQ/fn/xpMtrcn94SU3uJPigo4yTAV8Esh4iBiNGSnAixM/gRIKw0dTG+vpTK5568cDLry1rb5Q0fS+eVSLI23GVrGDOzGE15WXZV5yXrVw2cdygElg2+dAOn+Hix6uwU9TUUydOhNs31HfvZhrb+M83Wzd/tjvUeOgQw9dcZz39T4jwQmF2rh3MdC0brgQqM1T4TunUKaVosq+XRK9xz9WdYyA1Bs7m0pBty+VUPMPLCyW5THW+56frIDlA/TDHA0/1XmWqk3OnDRkolpMdeImhNrmM9mQbKJ92QgdTSQQEASc7cE/i1YZ9ZaLIljwuAaScgGLuPKxrr6ue9MiGbL/fcn9EOIERWMxTObA/9SEyMMdEY9NsOsETYMxTObA/9SFSME8p/ReuClWyHxZNZAAAAABJRU5ErkJggg==", - "type": "sample", - "tags": [ - "Python" - ], - "projectType": "python", - "language": "python", - "git": { - "remotes": { - "origin": "https://github.com/elsony/devfile-sample-python-basic.git" - } - } - } -] -` diff --git a/pkg/devfile/sample.go b/pkg/devfile/sample.go index 5f248136dc0..15ffdce22e0 100644 --- a/pkg/devfile/sample.go +++ b/pkg/devfile/sample.go @@ -1,15 +1,29 @@ package devfile -import "fmt" +import ( + "encoding/json" + "fmt" + indexSchema "github.com/devfile/registry-support/index/generator/schema" + registryLibrary "github.com/devfile/registry-support/registry-library/library" +) -const DEVFILE_REGISTRY_PLACEHOLDER_URL = "sample-placeholder" +const DEVFILE_REGISTRY_URL = "https://registry.devfile.io" +const DEVFILE_STAGING_REGISTRY_URL = "https://registry.stage.devfile.io" +const ODC_TELEMETRY_CLIENT_NAME = "openshift-console" // GetRegistrySamples returns the list of samples, more specifically // it gets the content of the index (index.json) of the specified registry. // This is based on https://github.com/devfile/registry-support/blob/master/registry-library/library/library.go#L61 func GetRegistrySamples(registry string) ([]byte, error) { - if registry == DEVFILE_REGISTRY_PLACEHOLDER_URL { - return []byte(SamplePlaceholderJSON), nil + if registry == DEVFILE_REGISTRY_URL || registry == DEVFILE_STAGING_REGISTRY_URL { + // set registryOption with `user=openshift-console` for registry telemetry tracking + registryOption := registryLibrary.RegistryOptions{User: ODC_TELEMETRY_CLIENT_NAME} + + devfileIndex, err := registryLibrary.GetRegistryIndex(registry, registryOption, indexSchema.SampleDevfileType) + if err != nil { + return nil, err + } + return json.Marshal(devfileIndex) } else { return nil, fmt.Errorf("registry %s is invalid", registry) } diff --git a/pkg/devfile/sample_test.go b/pkg/devfile/sample_test.go index 4a66f4e47c1..f3ae43a5741 100644 --- a/pkg/devfile/sample_test.go +++ b/pkg/devfile/sample_test.go @@ -10,13 +10,13 @@ import ( func TestGetRegistrySamples(t *testing.T) { - nodejsBase64Image := "UklGRloqAABXRUJQVlA4TE0qAAAv/8A/ECrN7/8vd7Ur//8/dmx/v33cnwf5U+7n7r3W1aurV3E+uLvLRbNwuybduA2dUdPRkxzt6BqskyykwV2aw+ycM8L1l5VsdGFXB8W9uNycUFw3Dvezko1TnI3LzBlpk5uVnFFxd0auDe7QMHItcvzCmjPE3Sk6Orqxqe4Et/sUdxi6XJPiTnF3h40Uu5MOcXe3Pdq4LNxGZ9ZZcXd3W9jsaDc+0uLeLNxd96CkuHtxd3c29iRrdDUnC/cmZ4gXd5ixkkXyRZpwJ2XmMjoyJFnIhR5dScNZycK9OD2PBEmS5LZ5+YGtHCwAHmE/GUIbSZLkJJX/Klw1cxJo2zbtZtz37rdi27aT2o5ts7Zt27bb2LZtmz8/zteFRdtWlWqdHmR6tZvBYXNQ/MW1wWtcgIITeH0oGAUyWHMMCScAANQFIxEZ0o4WAgBmKKCZZQYAlyLFh+s2E8+pJLUxHZQ6UibpBRPHmAKgwptTuDAGoM4AlgtXAmf2DPeis30Onek5dPpr0Wn3oujprkVnvBed9X3ozK9DhICqkrSfIg6w8GYzv1kASNu5KroTpxqI4VNxsxw+ScAIKAISn8Uj8Rh0+nuRzYxwHaxbfwTwCpCwZhEuHOAYUv2LVbn4VDyOkErICB0lopQoQjjMxTvNCZ3lfYirgdgFTI40ezA4wCV+RRyeyRJsBFzCQMlxTClPOAKCFzvyI0iGzd46OvsBJpoybEvSEU9JSFNOICw+Fd+sUN1iRIGZ5gsqwMEBZtPlmK05VxRJKGFNsYRi5cTmDHLCgSECa75WQtIl/TzdXlQd6JPAFU1mUYTiDJxmQyZXCddqoHe0eUqxREjXuJZMKsFGvZxWq/lKljLVVuIjodoMrWN9GaAKo+q360D5QCwlDTDlirux7yyvQ8ws1SkqCkhgqDmBA9YAzjqqEjPK4bri+tJAU4GoZM1+SBwPYGak2aAXoNLk+nKavajK8LgEwhGGSDVwho7yMc4oJ43LHCDBZmMlXAzmImUhRQim65W1+pirZGM44TIPCSlg0vhZ5mAAE8DpsesGGnC1ZLeWJapj1tKyN1qeeANiM2Nccee/ACzvN3Z2rQFUK0kFiWh+SpSloZwNZIzCP/UJRWXAFzeQcOYocXe3pqiquDYBaxPG7mSTVEnvC2d8FlUGxVKaUm6uNcGa4uqVM70OjWXF1bBdypeOgWqJ84OjBbQZtpMNUKikM6S5lCLxlfhKS+Zq5cTmTLL2s8Q4BJIVKbFaSi6J5Wm5ScuYyVm4IIIBGPJCLNAGcLwpHXYt54traXKyh48mhOsplywAgMqZaLFr5UCU0rJOp7xoMz2MP9A7b7isAYQtV/bKnnKHeNpSS/lSN5FGsgTW9mJ5FygkznEOp+hm01oSSslSOZpg5VLZAVzQWFkAGEXmVW1lrkWUFz4V+OJmasU/z7gDvQeP65dwlCnvCt35WU3rsJSMEfyVGuclR8FQZSkqbB47IDHHmOmik/REaVqJm1HFU66BIWO+K/ReyqxiisxfnlTiBVo2VGVUkcPmOvSHcHDZGJnYAqjcSC3FWk6JCjQslZaW8ZUa12uKErAaJo8eAogwrgFbkoGmPNoXJ1moL0vNG+NSVBZUad2lfJ8EliaXeozCiyZIOSQ7YGtEixJ9t5ibA+bGnPiuhkS6nizF+k9lCKwZoKfBkWM9lcT4uFha5ggkSNZsTifHF+id0Fo9uVek+GosdxwvRZMslTtWltoytNr1P0BdETgHKZNFybhapiol6qif4swR5xga0KW5ejoJfEuqxuKpB8q7omWiJUHlOLLUkFFBAmsA4UjVUCyUHQkc0VLHmK10n+aEiE8c9QaWH+vzoCMC1xHWdDHYsERZDZGl2iYMCQICjAmTWI1llsTTthI3E/2lXEWdwMKuhtfRGcXX1rPSPihuoFmWYj5yqRrSOhYAKini/tgv3RgW8ZSWlJ+M21x9XMEAg3XqU09nOVw/M8rFDSTwtMlSToo1a0Cy1NAQaAYn7aEaK0pOk/dsKZGjS+ZVUV2hYumWZ86vA/eR1GfHc5ZYlpa822qvL0utjhiIp7EAUG3jqkm1DgQSBKLlvjRQjTI/WDrAjK6lF4lFXONaNo9mRZPj5sms0chSawDHgPUvdiQ5pUlWU0r8cqtJVJA21nOy9bRJgKPP/IvFKkviKs2yFOUhDSmDkKUmgT/HuXXAXNoQSYef6qUlnRSba5gUDfQv616ILQCkHUx9sZBUEqSNTJbqXAeYKMqQLWVLAk2VRhnvMDNUt1nijbSDsLAunGrAj0iZKjo5T3ylXZZq6gEbo23AzWMqzFaC64myNC011h6Lcf1/U0BbI+7ShDzxsrYyTgjE1y5LcX5ytIG2Ju1kk3SIH3uqSW0NaZYlbr6QImo5249dSeN+MsaOyEtEd2tzRH2RliWTa5HrEsNosrLUFsAlQrp6VebshrbCIRAnKTYzzuylc3HsXG9k4bYN+J5Mrx1vdyTQJEulk01XllrfIQzgCn0QdryiNO0SbiBz2YTL9YgiDfRGhHrJMClXrsocaxGxtIkY+aYpS420AZy1rGIzmnG1KsZTuViVksJlFjFSURtApUv8jh1pH5TANQpZagugaHC+/PLuJJVAq6SqMoUiVzOVCzBzMGKaLev7dYWcL4FrBLLUGsBiMBeI+VTHdKBpJY44GadIYX8ZMkTMR9S/oWsbfA7O2sc1WfQqrvheU5eluraB78l024tlT1tjiiNzw4n4N4zEn8EC8LhZzxYT2XEJlDRlWWpiAXjhRH4impsSFWjzBTKlFVIWYxOhvsAMwLL5I4lo3pOgCctSawCXCte7wkp+TOu3r9prNlc7lxewFaH7/3wvQOVLdNuLOSWB1TRlqf3AHWVOc80Wg2xalK/F0/DGnSJplnWKGyrq1oJ+RKxtAwydc5GzaE5lRVTQMFkqghvSuDrsWrurtRV1KrNSoLxnqQKrEV/isbQ+Y6maX2qCstQqwLHlyloptc9K2tK0ktlcNEEUECuA3oNNIYSHtZlTnSokAwmcpiRLLQAsReblYmJ4Q6uo6FUWbU4PJxDY3kYTsO0hsMW4dl9EUXJpCVRTkaX6J0CQiGNfpOgPaMrz00rS0x2pZ8V1kYsJzDShSB5GmvNYEtNHxVHSFGSpg8c2pDEtxZ6cttp14MpYvhql+kqlBfQ2MY37sonMlT25MVFNQJaaAbgsuNKqpcqg+JYmtz9IHjaZMmIdsHawiVV8JgGKF1WHvZj0RVmRLUvNAI+b9UQS2XFxNHzFnhK3HLc5g1zhAIM1wap/P68B4BWNOfdoUkvToikRUpZq5Ajergl8J86BY/x9yTsSaPpwMk0IzAPEDR6iQG+Tberg0uD6Gq3mOsQNtMtSXNpAWyNP5Thy6lOFsivK0Sarlaslrj8s/aYcKH9wCyBtYFWYrZcWiKVRlpqtJ0vRCuxvRDeTyohJqsYq2sJBAiVBuTXFVcW1BVho0u0fy/04498X4IoljdpSDsQNNMtSJD+gt9F2a8qZOXOjRTTVri3xymI//g9w6jUAy9sG0IGNJEU8TCc2NrRps6645WiCE8+RbpzbAt4GVYYtA2lRnpaiXaaHzSxzj0rSINo/2wAqcyatu5Tp05SwlGRzRdIAYYq1xvg0JCWq1iKLO6WtiuHJVOYYWY1JYEyMo4cMCWwBjAfTbtdynqhAy80jcWMUDsVU/+QQWNKkjgRpWURb4FB6oJBiNXA5Gkvr//LXRl0ozBliLjU4rSmP8Hw5b4wyQJFBv851TJz6DL9Mk+FKWmkq0NulSDX5Z08liNFtw+nHSqgzn6PV9iUtToinhMItEkm6q+rgenC2z6E/KcrRkt9uDCeE8pQkZ0Sdw0Z6Aco6qjKztaylDLYCt93kCtTX/1gFZX10n/KVhvx2KV/qpkrnWBpV7NN6F3DHOSO2VDT4iEpa9jxSDgX6Gqm8Oizh7urYpWpFrlYuD2Bhp4H1b+FIcO6NJbLj4W9pK2feRz2f2vB7V+aUG/bvuMmgyMWhOk4hY8LQgsBZEKCYUKVUS7mwrqJypldI2dDViKyVcVHh8tuN8ZUa8/r3AasG2NGDy5XVWqxVAvFV6Fuca4nQwIRu2w5C70lJOgij5vSVowmqXCp7YGvCAHv47QUbMtfJv3mW16HZuXRIV1ypuRUmGKt6QYKg7E7lQ67LF8nUbKaLyw+YPGiYIf8sFdKH1mo2LSEDVnZvkh7qxzI4H4oDEvLutySYedI5ggYMGWqXD641r69bQuac7vQKKw36WR3Yd41l0yFvXpX0n1Ez2qNCbO8HuI7XQr6tOkqkJhJBPygUSv/hKSvUfbpKegVQwV0wWKNA7DgWo4wXpiSEDbayhllU/WDRWezCoIRikbRIkjHmHqD3lGuyMCuhngoM+wT9uFEEpyf00oXzvBrAiKlrmCxOjxUKp4eKQ8XQjxMYTkGF2WIqZhNS0vYPMfhJD3Oray8dfwAAHb9g51DnzMWdtDo0urOR+BE9IWe0Cifr+XByjwr50PNibn0TERFPaphPs7q2v5FRPXpyQlhOPkF3OufDOXJ6VPTqF2q8FucS5RhxbWWCSIcfL+sx847ynfnLxFMlEP/fBec78Z7qGddN1hnWbsabaz2VHkmUWwgAGD20bpBsAyQ6h0FinFDfXhLBavhtdQgA7wCX0j0gnWX94NRypsuF7s1zfB9kF83EsWYW7bN/HnQni7Habso0p+ErUN2gCuZY1Mv9FoYM8YFZSUogTXI4JM7J9Y2LTSywsn92gzz+g5MAXiOznXKHVMm1EN00Sz3xYI7DPZAvt5fLc7nyQK5yaZfWvi+I95QSJ654rKF3QTlMWPEMBLBlfJAuRc2enkJPPSv0yDX/9yKqm1hFQ3oacQNJW4g3zHBpM1YbfFL5pbRylR8yqENJYCnXSS9ldmel1bSFqolyg8sOwMIuI4MKgLh0J5dIPN+zVH1zj0YpTRQCtLIAcEm+lmdRKrbu2z3spi2xRLNZrnK8wXJeSnaNU8IKf7dAm5E9sGjnqU2H7H+hrJ5/des1i3vgiHDOco0WY7OVPqUcJaLDEUnSQUd7UDU5vaRDNwp7O42L//3uamk/tOhTuAIKQxuTABFKNWAXBjYC5SrRy5RYHoefidqkbpI/sGpY3PovUgjz+1FaPZ5JUNSINDP6pGYDUUr0NEuJWhqIJijJjCoWjAoSo95WN3i7d9a1ACy3c5sDJIH4SnQ35SsZbzevhuOKiVFj4gRdIAGA6nS18KQCCSxpDFOBWE+qcITrFDBq1IwA98kemxNPv6nauq/32VQPgF0GzQQ4t229Z9MWXjVwtvchzn2MGjO94DqQGk6LJY1rljiZ4jVgy4hZxS1ulSUJtBWrgXLc9OxS38bc0bmxvn0dnu94jtZDtXmyFJSodmDSeBlBXVic3j0t4mnpoOn35XbPqXitEC3FzmmlPdFCrW4gIQnyT2q8z7e0zOrJ9OE/wyWLnYZLL96lvTvsfzHwlXc0L4cTJyZ6kpT5K+H0EM0v9DpTKqiqmbo7QdVDmuDMkpYkam+utEr7tOtJ2EQgxxdZX9BvtOwHYdIR9tAayher4nfbQvT92hfHhFFt4jKmyHLROWSCwGvwHStsRzBag9cBbyMV+W0x4oTWvec4c+3/bkQqMSR2t+RVWBdNKX+jxGxGr8HSBc6nVD7MTMoVEj+dWqQUso5SWREUaDYSjWR7azn5UlwsW+ESlgxXqYqNdvIEKFqcpXiY2G1HJHMk/iU41vXzZAZ2DU2EzqR2dfV+xfoO9i9gnrAWzbnBcF3xlcPwq98Sk4bKFjin7HxahSk/8+YLZdkC2FpukJS1AICkzfq7Ep8OE1ukpJy4Xhg12Fm4Ko9shAtgLJ/pdeiR0BxE77YOYuYawIqR2rSEzbL6CGOsGyij+GSVeGiNwZNyghQPvdp1tvtB2pat9YW+OapiM+exYKBwIDihieEwLkff4QfIksCWjvH2hHvUDcLM0xLjVAEGyho4DzYvMIzzlDGJ/TgE/ewQmKupfOgn5cUHOPqYN9LJpNSVaT/00jWq2Y+LER3ZBWrgnY8Nh765uV/KROCQgXIbmeY9Yyokc9WLhr72PnBl1eEw8ZA5+wNiyEB5NcxEYUxCUraZ5+iCzkZVXt0I/bHGY0Q5zQEYJvPgSHHN1dzQXODZvw9iXcFX1JctsKLMpKVCeq09XNMUMUwYJROgKHysWjo0T+pOUi7iJH2ZB0XuamqhXwYyzpih0zgfOPKc+ZoXmuM3mdtYg963r2BmQlc+9kUZJ6waJXvxNrhmw71Imly/sEtnviKY/amMUqFLbk40rtI4Z/lLf611LDSzhT9DEsGyvqyDsIlPj6VVfbOUw1rapJzEVzRO6lr4HUtLoTfLyZpUl9C1X+ckNTir6U7GC8dt1y3USiuUdeg3UL+LSUxkw7yRlgKzG9i/Nqoj/bgYpuTFXM2LObmevZjzvDtWPysWB2GYLODJbR4IJx9PSZXqJ5U+gJ06nn1tAmAxn/4J9e3Wn/C/WUQAxkkvmMDu4TDakRKvw6xQ/aK4UYkCANrWdIkrCe+o9Rspo/jiVMt1ICcMgXgZMavPg/OLs+dR3VHUs6H3sbpf5/hug9VdOEXVFg1dRNOZbME8y/tQQOqnJJAukzy5TF4SjmOrVznTubP5YRLMgWJF+Rr6M7n7ynk5nNiMDXONsfKJF5yjjBvHmCISQktZex9rXfPNCDtBkjtXbVApTd2YOcL0YDvBXZJC1d4sDdx3qrqXQHWHiGN2MeYk8R0Dor7tmmnrH20eXqyBdNM+XnOboy+O788O56i4++KFc4aaFQ9nTyaaSMm3geoIldcbYCRmjvtTXDZ+RkElSRlpnZNAtFugRClval9+d2Wf1Eorm5uxfScT9e+F6yoniGL1VevvgRe3bvCztIHkl8hYOrQ6KyXqnNY3fOC8Y1IrmSeuBFTj/4DqMRNJMt8RSr1t7zRysB+kl5sXJeLpFWCgxHUc1Zc8MHzOHGnT9Cm9XN+Zvb+LznAAvUPGzU6cz/sDkDLsi3LFUqKj+eJ6al/LgYocPsvrUKqPaGBuMWu4JQD9Q0YNOvFWmezNcof4ge7BJkqUM5VMSslu5TSzzt9MYLvfoMF+8IA/345nrMaJOnMl8PtyTslmLT9Dij2wSgHGDPqBLzCeaB9XgdsICRElSo1V4mYP1R+OEdBpzGAvHQdAMrnoRKxy1HMDJY00wcoE5lHiyo5AzBgzQC/A8mHqE91++5TjSePMHohbEfuWsHTRa9BgGaBZSPgQ+TU7au3OKl/cRnjLTUv60s74LGJmbyV6DRpgEgDFhnKf0tlqxxZbdmeV5Uqg9M+EK62LpJ2YMWqAVQAf9d9cf1Yls7SyeaQmuSc1t08pX4kX6HnLF/Zx2WPNsAH6FwDUDUDSYAVSPWJKzhM3NxPRmixVDrRPkwRu4DqBOEqHIGwplzgDjCT2GjcA+lcBAM+OtYoTceOokqjab2r2dHvRYzz750Gp7tPuRYVFYQTJ3bnsWGAFjqQb5otdoM36jH5DB8DOmVXUMyriRT5KkutbvEbKpzdBauWwiWUpFc/+fVCqGq3JxoHKuHK0++mWcodLnI3oNXgAYGT7UKj4ma/LYRFaXCvfMLGb6hrxmcl5s4/Gqp5YjPVIrjKWtrT9EJXkV2YNcrJOi3ZOXtxkyFyEBolKlmVP+FPOUX282POIaZcWjw4MBlrKJitt9RAO6DI4GsHmdw2tXdz74BVAfdtxHeCIUzlQxVBec2opS4liYSOn4eQ2gVO2/zLWDI7GtNHtmZNGAACfhkqDFfR4WCPdCVfD6QxVR5To2DFIMEZGdoZhWc/dsOuXbAMAS5TwZFJqifElsbywfbEYC3QaIXuBEx78CfXtlKd/pTsaDDv13gUXlgFQbLlSVuJ9Eka5d8omVRQOGSAjIOicrzeVavrk+nbuq/mbVM2UlY1wxpS9awcBInqpNiYqXPQGcx9rBkgXmGfX/P+LpCdEg3k8RYwR0hiB/jbTD45TPJxqqcYTTLwRTt6LW3aueNY5bsBE2qJash9CIx3QhIsAzmUz44VmLkZV0WWAjIJKhWpx0QlCFZfH26SnaGsUJsAlw+HUwjwtRTldHJIhQiVHNVdzQm5QS5WTjUYzVsbKdGg6Wh8/iWaAjIARCbNJ1lKBM/5j0ThNquvgem4nQzNWYKZejSH6HSwKV/9pNsQlCF1cWjXKauxvFCbBubN5IDT7Ch+Nim6IXECDUZXH5lQo3LL9cLHWSHDuh6OjlTREohohdQBQ/TYz6TBB16RSoFH4NCC9L2bCvNhz4QzRCFkA17UTn5QV8sH3aoQtlhsnQSmoHg2Jl60y+TBCVnETq6FfSJBMfUz0NgIjuEgOu7AkIa2cYj5h1BD9bkavHA9Cv2GJMJZY0J81POJq1g/9cPwm5ywepiHWO1eZhtJYmG5z491MEbCrEfyu35jIhv5kztiee4AtI2QNnLt2zlUhX0rOpLqLoXmdYRFwe45M+aEdGK/GmeMoYt4IGQJrTeuU5YduPLOWuh8ndH7zNoJQJyZaOyT0i/EVIhcwSN3pfM/BSLWxYRX6bXGsFa57QNuQbuxvA0v9FsXGQ98tlSl+AfQaI6ug2l3MSZg/Il6fyRRfNjCxOq/HSmb2A4wjazxG4UlahX5nkWueSw17jZEJ3HxK/+GpMDFXaXErVZ95xWUIAIfWuEAD2NV1cWQAgM9J+VZqbRcvTOadS5E+Y8goX2yB2FtMhnkSK5CspDZYid/zrh6nUnVxJ83M7O/q3HVw5ODE+nLnQu/FHdpGPWPEKG6kv0tmx6BYoae5vrdIssayUQLUBTK11bJYWsagYiZOoWSQzlM5vkguMrTaSc+ZtJbqIOf3pVZNyXniKQn9fIFF1kd0wTBZALOqQBEqCcI2HrpWcq62shm9Kq4+Jofrw/XkOkAEEk6c9VR2b5cw4NJltC+CYnA7L5tZzdrG8SRiSWcpT5ik10qZrdlmYTqT9vxwe63MFn4doYx14wRbYF2wd4urNJy+THlzx48tHu5O/aPH2B2tOdnzXh6zfHlPKtcRb42ljjkSZaJU8/IDY2mx0mGPP+SUTapwbMFAAUAk2K9S0xncxBLfcWezSRI3tzHri8TrmTjpjuTxlexg2nc0nQDIl92b9xIjMFS2wVA5lamX1YA4G98XNwg8RwWuctOBrywvcMTXuo2evKzUO7hO2GusoAsUCaKueHnq2ERjmy9yeSap4YljGwYLJkESIxVuZpZEBY2LcsQ/YDNVFEm0wXDBKuomAten1GK7L77VuEdeOxrY94xEwyQMGKwDv/spJ8pjoizVWClLlg7EWqkuAJMwZLB3BCwDSq4ZHVhS4qtGILDSzqXVbKaJWQt0waAB2gBOyMMrRjPDgSs6JzzlOn35mn1JxFmCjDbAuME2sAM61UFOt9nt/skpX4nSb4pQhFY5G7V/3F+mqAC9MHSANaAu1FtBypfHWBISL5t2PFGq4d0O0kFffrpmrhC9zCUqVWByBEYPJmYAUFl/SaJx0V7pUZVki+cHjrhK47KWOzZ+/LRE7e7zsH5TeZIYwMwEYPwA6AVALYFky5xgZb7SwmaxVJPZ8qXlhluW9qm06/mBChzX7diX3Rg4PtkXL1Q3E31EO+n1vyBkAaztAiKYd3ty6PNuWD3nPiWSAfp7AYAiTrWB6+i95NRwOB0l+8TUSixaqNUW4/VssVaIxlbME+3oBjN+e28DEX4xO0QCMLEGILI5peCFzOp6TuEwIxsAy70AgE/8k0gWf51q//X/z6Q0TgWni2qCNUOZvyrO8u5c7ryvkmuBa440RdXL1HBlcL6wblHt43LmMmKYAID+Y+dvFAon33odjxx97p7Qwk7hi9z6iAfArq73sR6iWfEOUNF+NkWRS5OjS2VwO2/nX6XSedVU0qdQkVdDZNRfsR86Wp1hsmZCn3dF9TxVHR84dM5UYSkU/qkGIs2TJLAc+QAAJpZnDq1qkx53CKy9j7Yh6G1XegNCnz1otvCYX5J+cMic3sNLod4INqqkI0BbhKPFL504OD+6vk6A0YOjaCQ7BJCGWkmCkC3TrczQDYV+9kvCNOupuHAuEXRgqFFpgiefYwNAE4G4WgsZpK3GSs/rfehGJ6he2ZWQ/zdRUnx4hD/QdtAw2bsFUFyJFlOCkJnH8BmfQ88burEGrrDEuKtCn2Q6H01w8jj2wNqIMdIGvF1OSnep7Id05j0Sd5PrLnr1YhcIlbu96DsiIRPT2ZUa6Q1HCVjVqdQKf94FgtGkmARYUvdpLDE9HlrWUX5L9V3pOAyDgyD+mDkJwmgc1kBQ5HCY428VPIFtPbyWHi9MVannCl5SE2KoH6f9+QEfgBm1ZSAd5pQGgb94tx8Ldun41ZE27FkKJwcEStxMrfj9nose55knMet5LWHO23brm9RKSJvundla9sN10rFUJvG8Ma/n9uOPmQNiaTjrcLLUTUnjMm/oGH9Gcbk/ruaEj4lrKvQCLG3W9z3V3JgoXyQ0LoWX4HhiQUeWwRiV44PiaOjkN92SsDiPODINW0c//mG7pEOz1MoZ4lCbBvsBKsbNu69nfh1aapHwjRqWjMfuFQ2od2QP65B9vB9eDLGU+O1ic02Q9tU1ENtdmtkClV/3dBgGo6w2FrFJnHC3DWD5E932Yt7RMPoxpZzyCslF/wBH0rfNyxOxJGzi2INEFVJcdZwdDRhU0RqYN/ZAmE89XCWV7BBoGueaZWwvOhHNu5qGcOrLq7S5vuoe3LQTF7CDRLa9W+t4HgYHYjHOfypDra74CD7sN+qZUqFpT1D+NYmyllA4doBe06JcTb2zDyQunMTCTuhsnaizZFb62T6HWsYl0HTuedmYMytct7jEgBlNh8WlOp9qD/dl/clNqgdYjWw6J7ADAnGYxC46A5aWqb4S9VATpMGnhMlGcN0BzvNE63kdSfuaDv9REZtqiBQFDG2H42Hij32zxdlwW5mxSXtxlZHMwRmAtetzFk99QhklKtDyH5Ls3CZXE0mtcbo3dAKEHzFmS97RNPwCR5zjD5tM+YMD9k6GiYi7ZR8jlpVwHotbIzaAhEUwCwDHkCq+Gsv3aeqWq1yZrcSOfOVfi4VG8mK6QJGmeuIkstpGnuEqGRuoRonvhDmAeQ5c3js6v3d1DaBSI9WttIftEuTXzk1IYSKSU1ziXLfLZsu4aBqWmS9+slB8XqRtwGRj1mBeFyu5GksOihWIhjJYsrOmUCVSub8nHNeeFif2vlfbw/9Oh1eoshGx1tUFEBGPz5aKtsGKBqLyXpFq6rWwgRgaacxOnwvAl+LUp1rzjniulh91QPDErBLDpK9Ue0jr3/nl3tXr8oOu8c2aBQpf0la4javYPx2rkTsELM5aSqnZWvHE07KUI/sqKzXSS4p844+RYWgCD5K058/YMuBq+m4t5bdnW1PF2of4cZz5CzzdtWgzmp0ST4Xf+R2SGZYjdSmW2os9ZjDTlVlxPE1t0t6lRRP3lsseOCkyhnTBJUl1L2n2HdW0DuWJ05fJe4u1WrzjAhmBhixaORvdXAUROtIOXgC4qKQzdynVMadE2yhVnUrc/hJMENC7M2JGOXQurn+lanJJ3EBrm5DleJbnSVo05E4V88FiNTJHHUR4crXbtZwnlqaVSMugeXmcqyxWJI1+bH4G4Nr6+M3DSd2DZSzJlog2bsmR6WRTLEnpK6X2DgksTUtNDcRiVH84epE2GKChLpzfesHz4xqy40lfLB0TSknZpArBVgSmnjPnsSRaNrQOgMtPFoqfhbI9EsfI0Ab8PebGVzeXhtMSKN2Q9gRVQeStpBM04cSeG1CUnCNKW1mb8Yqv9LXUCUboOJhmAEKX+RGtZqbEc/SaNrZ4gR8a25HlZK8Br4ZTkyokPVFK09SOuZUa6yWVfOSOC2VkBnjtlBIzOrAkntIBR9LDJhOONkQSWwCje+tiRwYGxQq0LZWJJkh5JLvIHhPM9jpACaXqsxcHPPFVw5fYV95k3UBXhMlq75u4+pTNsWGRQFMNy8vEbVIvKwDY2hnxVXwu1m/iLKSk4pKEnt8QAkuGx2yqh4ikGsdEJ/B+jzMCS0tbJaDDfPakK/8bWEbE2ypA0WTetxRr5T6lPQdWvqQv7fCRNx1R9zWAtIKUn4iWLW0rUTKWjMX+419pKqNimZ8BWIZcL66rfbjS4gWiJRFI4OV8mzTKcoyk+yTAUaJ6W1ipjGlK+UrcciHF1L4aYOtgUxrizrcjLjP95opUsp4T2htVSizVMhA341Svn3ME5f7bI6BBIo6fXFQVpamsdcSvBMV/zuyjE47+/U1qYCMzAMNigv7fkl2NZw9k+3zPs9JK+V4Hif+kjsarNmeJ851lhci5T/QCFBemuVjLaQvJUpYMb5zxObTIev64m+AQsEbXAPx9ymmm9EBis9pai4uIxBcLexJn/xzqflmcMsopjg7QuQsRwiGAZUKVXC0lj11K63CeE5/qdwAX1ySHurQKAK+dKozzlMkjBu72LxwYTI+VPyWH/f2e95P7+IwmgOWhCMrkOZKU+x2JjaymOTxL/GTcZnUzfsDaQTRR218/nydRSDKvnWNyKRTrG/MBSKrv+zoBABbmI0ZYWgeYaM6ILe2OxhF1ycaSeVWkyyQG0IWmbPMkGE7VW4igS6eZaAMe3MWarRcYaFrKVTKYPP29KEb8ZnQMYhBUO+f3b/d3du7c24Dq0nLjeBpPietrtFoZ06YoO+LkCimu2s8JrB6EAdryBDDapXfBBnDRiPPPKjU1p03tcsVNquKV/BQeONa3jTA1MglcPckIWNul84CWOb5XZi8OWOJqdDPHE/EPziUHzBhiag2gGLL++MOUXxwtPb3iSeB3vIlEtDyldTjPfjKa4Mp+ZgZ5USK7tgBGnvJowxQxPyDnzBPGKIHppShT5N7xYmJ8WNK+ppSTX7Qf3hsC2kaN8JJT1oGL5DrHNVWUdt8pL9rfj3DUxUMYGgXtIHPgaRe9jKtJU02LbIyZxCLVxUcG9Bvg2J/WABBqN+8T2IWBenr8UvkY6foecukAbQ0fbjhpJ1FfLOQcCVxtwlL+SIn0k9AGtgzxWmvAReHaxvXlAxYLA7PiBfWKyvENk7P4IbmhYkeBoYa5mVwG9/BILLlPU8pKS7pSSHHVfAdgaxeM0Fj2l8N0llKL5Q5RVoh2uoFF+23+iQacLWIGIEQ5Ny7NPKpt+ICBJW5SiqwJyt4dRxj6+w0xRfi89Jod7Ut6TsjYHcurtxATz9IDtrSOletqiEE7nrO01rA4/GxCKE8YGYMc/9cuEGK1FMEtj7nKUeGLjelx86Ju3q/FwX4tY6vmWs0pMQ+3uxL4mtzMqXx0hZVFsgJ6YYx2EJQDpdKlLYmjNF1tTTluszoJD2BmItxSXKrvv7Wa6dNU1npK1MCizeni8gFWR2CcxrWZ1FysZZQWgd4JJNh9ur2om0imGIXch7poAahrKbeAM5tKT3uilLbYmX3ms3oqDB0w1MvzW8cOiF+UM5Oa0hazYvkyt/FLj3Kus5ioW4Gh4zZn/HWmrVjLaTsPT1rJUr5a4vykaAFbxntlfffn7kWreU1FqHIkGBCb6PlgwMze+YsDXhcrtbuUWdLWipoWZ3ch9TN/DLA1AcO1SYC14cunCjlXPF9LKZrORP8pkf59AYCR4nroJDayWuOmnMwx388roOzZsV4MXf0wYuvvR93vP6Da+7SLQUtaY/xm31xilnX7aV7xk6KM2ZIPxAk0OVpHswmhelwvztmobQH4bVwvC935QbECbWFLcfMp39aVwxVLrEDT1I726JmeRb/xrgJbMHLbArgciMJzameulnU4lrjJeK1ln6S1yWoqv2hTuqh8gN6DRn+ViWsAJ4xqSPOVSUhgeb7GK+2R7D6TaoHrwjPHSFfzcOFlhMQXdhPDw/odYkIF0jdQ3UP6/sWBBTQPdpXA63psK9G8J4Gjy/Fk6sX2f72vUk9TbS5sdAZgfEg9drxdNTxsSXniVcRmxojdFzABdHWiObGhr42XRFy9NHM4K07DblZassMJ+Qrfqjm6/MZegEvv9lVLlSkJ3IasZKx9T4Irk2QJHGqWLjuyDWCcL9yuVVytGbByxcos2qROwhvoHUXzZJ0TYEvmnKKaS/UNixZXQ1kiLVOpZ0U6R4MGtmAdzZdtAVwaVN9KRyp9EqjwUwcHqlHSN0ITaEOzZiMLwA19oanCecPI6soSi8RtTXEqiY3Awq7m76JrO8EzGOUgwbYlP5YORNWvgFiDF7hoXyop7nyOP4D9+9Ec2gLworle/Ek7ah2f2+gY7DtaHnCi9h++T3+iMd1MQ5Yk3sj9+xaFs30Opap7oqVq0a6x2jlXSJrADJpR29kPsKiUdcQp5gtX0U377EQTJZ/5yBy9/XUiAb0jaF5tfQsAcJ1YpNsIAFjdi2bY+n/JKACg61Anrs1dAwA=" + nodejsBase64Image := "https://nodejs.org/static/images/logos/nodejs-new-pantone-black.svg" - quarkusBase64Image := "iVBORw0KGgoAAAANSUhEUgAAAGAAAABdCAYAAABafGNLAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAABgoAMABAAAAAEAAABdAAAAAHJ9pkIAAALiaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+OTY8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjkzPC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CuvJJYoAAAu0SURBVHgB7V1dbBxXFb6z9tqOMV0nWSdU0HoDDwjU0LXUByqQvJbASSokbwVqkQrKuokEaQLd8KP2AeT1W9ogZUOTIPUHr6MCbZWSjQSt4wLeIKQ+UClOmweE1GZTqUKuN6kXJ3Hinx2+MzvXbMYzu3d2fnbG2Ssd3517zz333vPNnHPn/owZa4aGakAyW/vO52ZjpQCLSUzqlpkcNVt+PfCj79Po+1ygxHITP+rJWemTEACK0iU5wSRpt5XK1mnZIpPlbKBlJTWx7+682T5WBWDnb/4TKZVa0xA6ZFbwHckvy+OLS8Fk7uDGOdH+GwIweKIQB7IZCAqJCmvyMSYzdhl/42/t3zItoo+AHtM3j3+cgPJPI6+pfD0FVUnDHd0LH5FTdFiFj2eteQKoIASMcYZmXL8G4Kj7aj0JtwEA5UcJPVTZvPPr13tlyeLiYmukmk+4zQRB+eRwm8qvVKG136G2tuVMNRGrAKg2q78aczOvLg0M0TDeqOQqAJIkJY2YmunWNFCidyiDoPgA1fafN+DRT5bYBYy5svqZ6zxVYhiis/vN9BK+YKOeL2glIQFZism3ueOqoot4BY9bfQWvWoP3M1PqaFHYZwbbluLoVkbbNcUEQfkxbYbRNYZWsTtc+YpqMLzMYGomYaQnbXqABaLaNLrmPqBbL3NNGl61a41r15RZxwmTT4Sz6N45kS4aTVxyAMRGP4EAVdgMFRqQLPpBDkCFSOOfgRV5zjj3zswBANNWem4KACsVGZWV2UOYW/9WzCjfrnSaXBw89nHGLnl2yVFGQXYJq1MOhnOlKQBxBnFSYhP5OuXoFlOH2GlMLvbDaQrZa11BDiV6AQDetSGMCYYAxChjbWmJZS2Zu9iRT7rbgktpxry9iNRwE8S1XxGPMLaYBxCJijRTP3ccm01iDibvhxU8Lz0BlUoO4WKsDEIgJbE/5SozjX7TnMtKgGVkzMkb8Xgt3asAcD31l/3DrnGsMgGIiTzPqIzVpdNMibF+qTLDB7+9DoCqQrLjUhxPBGz6//0D2fn2tpVUqSQ/6QNd6zbRiz5At6FIJLM0Av+AYeuuOM3FkJ3HG6ZvlU8d9RMA1F7GYtt75Q/HX3z+u5sOfSHcSqD4OvjEBEHHka1wyz8psNh9Ydj5zREknXikm719aWnmV3/779Zrt2RfAuH9J6D7U4wdGi6yfz9/g5Sv1fKD24JbXx3evLDnwa6iNs8P194GYO+OBfbBb4vsqe+EWLC100ihrQG24ZG+jtDrezYXv/LZoBGbJ9O9aYJi2xl7bt8Mu68XdodtENVcV7sUOjwUYh8UlgupN+fDM/MrokUbxuetJ4DMTfaXM2zqEFOVX5diPh9uDZ/8/kZ2cKCrAFDqkuFWIe8AQHZ+5vcLbOirdNfbEnZ+qSP8SmLTzV1f7liwRaADQhpvgr79tRvshR8vsY1djgwpgy1SRzLWxR57oLNw4u/XuycdUKIVkY1/AtI/eAHKt9IHobI9XYHgvq93HhZidpGpYQDQxNk7Hy5l2ec2Z9HfCGjcwX6PUh1b7mr5SD58KuvGApBoX1wHgCbOBo/P5nDKZuruu6QoGjoFwhwPS4L6QOdAdgWStQ2UUWmKtbehTloA2oVZ050RpDc0uAYATZztOF5I48DHJfS4X9Pr3bjOg+LYoRdDPAy6DKo3UNkBUByUAFGdQ6CKQBN8AcwrPZSSWby7IsPVn64AwBdIakyckRMekWU5j3gOhDuVjYLMvOES70GAGEFMlAeNgIyCUmd5gq/+BSAj4SLpjgJAdn7wxOw0Nn4dQWOosyKhF0ynQVmVCIhxUK1wFAwR0DRAnEY8BjJTJy0A5UBUn2vBEQBUO58lO89M7qGs6Hk/fp8HJVUaQHwBpA3nkNAHSqs0hfh+UD1BqbPsH9wxS7YCQHYeDhYLJIqdH6pHAzplnkRaHhSFaaG7cxhEpuYy6GFQXKVpxLtBNgTyD8q6dMoGYVVF2AYALZAE25ZJCSNVa6wvM4RiR1T/kMfviGrnSRqvk3jsDCRvBCYp7+Sw1RYAyNbT6RrJ+cXwXihlChQFGEnEp0GU5mSA/BLeHZwZstoCAO2WprNQ2Pw07qQmNLK7NddOXY5iHTpitCHAaqW2AECNoMMHkwe2JDDUJId4zmrDPFD+DO78bRJ7I2V1k1i1vtgGAK+Etq9P7u+JYVPUw7K1lyku0u34Al7QBqD4uFN3fWWHbAeAC6e982/t74ngGo+wqZcpLsLtuIitLweh+KjoRjA7GugYALxxeBpSgcBy1GX/wKsXjY+W7fyf06IF7OJzHABqKH1FhPwDzpYN4NJL/gFtUex80kk7Xw0sVwDgDaDREvkHOOrhBvuHy6qdx/B5Is/b14jYVQB4B+mA29Jia7T75ckrPM2NeLnEFt5r3XQRdh7DSrENv063qyEAUKdo2Np54Ng82/Y4Li4WnO4obeB6dOzKhp/JX3zH6brMyG/8mnB+BjP3T4Wx5ZCxkz8tsHt61my+MtMhLW/+6srMs3+Z3/p+Ydm2xX5tHVauGw8Ab33uPcbuTYQZbcY6tk9i7cEOnlVPjK2KxSO568F/vH/Tk4rnfWqYCeINWBO/eHYD+8xjHeyls3WZJdj5G6+dv1nc/fLVEJRvuJtuTb0NSvAeAKSIueuM7f11WPEPFy4JA0F2/vHffdL50tvXQn7ZrOsdE6R3B5J/iB4o+4c//qJotHdo9lqp8Oxf58PvfrTkaXOj10VvPgHalpJ/2PRoiD1zqsgWlxd49tKKfDOdu7bwvZNXSfk82Vext58ArSqfHguxw6cussEHrj6z54f/+mf+1uz8LfkeLZufrv3xBJQ1iskyNixdeXW79Iefv/v0Nz79yut7w/cibRREeb4MfgFAnSx7I6PVMk32NWAxSNuMuq+9DoDQZBlfDPLgZF9NYLwKQF2TZR6a7KupeM7gNQBgy5VFEUuTZXyyD530vH/wEAC0oE+L37UXRWiro/Jta34b6cSKWeKLQYyd0WHxRJIXAICdZ30SezMhuihSkrBbTjb+FGSlZpXFoP09ccU/0JcePRYa/B5QgtLNLYiUv1ao7AXqpS2Qot/sVz80GKUyXsKgoU+AWeWXFSeluAKxBTLBf4vGooCJyrPK11AAzDa+4u7nRWl3nK+DrwDAflCtwkMEip8R8A0AtP9Ub6s79qQ2AXDjDsRZg5RBPf1ec6wG7dRN9sUToNz9a8+VrXaotNKSWr3w2Q9fAFDl7i+rW5LidDjEZ7pXmut5AFTz0l9DuSH16+Q12LyX7XkARM2LzgjJe9rWaZGnAVDuftH/3ofDgKqv0Ommc0krATliRXoZAME5EtjimJXKzJYVvfu53Gr/KoTz2B3j490xKzLLAMjKwWgROUm3nJ1Sj+jdz1sOfrfaR1WqT2icV181Lh9AX8OiAICXmek1OfoJobb25ZwbncQnKbVvvfot0qS2B5cTmiRHLkkHJbk1C+FCpzONdKwAgJPsOeFWwtbScVSaAgBFhcuZYFQBrgsAZaraRF1mWanP/NMLem/mRvJgHnN6eRJPxAHrOfwWQpOX8WyM82nqvxcxbCKZD/VAuSGPbRnwsZNP9OjerGUfUK4pbVuFjRYksFijTku7slKGM82Gul0FAFs7iAlrsusiDKkvcNU7I0mZ6gzWc2WcFKU1aiNJqwDQGiqOliaMGP2WLrJYQ2aKFORk31pKyveKDKtYBYA4FLvp7ml3w4bZkJEUkQEnmBHhq5NnVF0KNSx+GwDERacZPX6k1LAzmgyhxRrV9GqK2nCJG5l27dWStAYAKrBeQBBZrFFMr81PPeo9quiwlvaRrwsABwGv2Qfx28+OWWixBv9LM0N9tiEU6RMNZ/eHhcwf1WcIAGWePdCTrtj46ksgROaTyE5bdMakm1HSVa33D9JrZai5L0h5RPHlQXo7pTl3ZfIpgC9X1f8pssr6nf+NxRqxSuQUtkWOifGCiyYwS/g2Hf69o1mlC9fRZGxqYN1r4H/Cy7I+ycHqyQAAAABJRU5ErkJggg==" + quarkusBase64Image := "https://design.jboss.org/quarkus/logo/final/SVG/quarkus_icon_rgb_default.svg" - springBootBase64Image := "iVBORw0KGgoAAAANSUhEUgAAAGkAAABhCAYAAADV0Y9XAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAABpoAMABAAAAAEAAABhAAAAAHcadxgAAALjaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+MTA1PC9leGlmOlBpeGVsWERpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6Q29sb3JTcGFjZT4xPC9leGlmOkNvbG9yU3BhY2U+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj45NzwvZXhpZjpQaXhlbFlEaW1lbnNpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgpGdeVGAAAWNklEQVR4Ae09CXgURdZdPWeSyUFOArlDIOEIIEcg5JiAikICoiK6rLi6q7KYEPlxPX50yXqsrLIeJMiCn7uuru7+6qqQqCsq5OIMNwEChJCLnARyTI45uvt/1cnEyWSmp3uOZBK2vm9mqqtevfeqXlfVq/eqahDhxGHtl/H+Uol4NkOgCQgRYTRDhCGCCAOWveHj0ce6F/xCMtENn5sQayUY+BBEEyKYMhqR50gCXZBKJGVbF+/thPQRF3DlnCas262cIEL0IgKheGBqPnyi7MgcwxDEaajwPhDeT7pusvC9B/JVdsTvMFTDLqT1uUlRDEIr4e2/H2o502E1HYxYC0nFBGL+QZDqL7KXHGkfDOIcKcMipIxv75YRdOdqgkFPQTPc5gRN0YUY4kuKIT7avqzgRxgyodM5TxhSIWV8m+hH0OgxhkEZQHi88zTDAE5KCYJ5s6E74JPPH/icGpAzTA9DIqSMb+M8ECXfBK/neqinfJjqKpRsGfSuP3q7o39mpeTrhBbWw2/4bL7L2w8cwkqN1YG0uiSPgllZBJmRm7SGoOQXQUDPQpGRIiBcu2gGER+1qJjjT+UmYyXGqkC5SF9Y93VSsFWF+wo5rCdlfpMSR9P0LqATawuDTlKWhmlqlxbR/7sjtfgmX56eyJ3lKiMUVbBC+EN2Wn4O33LGcHbvSSs/WylKz0t+CQRUDMRGg4Bwm5GwVlsrZkQX1u9JWW7ciOae5YRiLeT5why3whwMn3S79qSnvlkYiijqI1h4JvEhPkJhsOaXrQ5UPbNr9nGsxpsM6z5TKkQuzBXI9IcPJSXQ2D+n5V83CWwh0W496ak9ylSSps6McgHh5sQv9npZvWLfhtyFZjVUkZx5AeD8ZSKXLvgVaRgiFX6tCnYRUnquci2JmK+BA72pxipmRlihBB1BnTClVEB7RIMonxkj969fO/eNErZeyPohzzYhwUANFoPXYVLdAYyIRlgj24Ndf2jAHzLylLf3I4M26WsP6W/mvnolwnvKFBKReL11xzPf3+nWDycgYrWQsIIAzH0MPD0vgN5oBHUjGCZvfW4yqxxkfKPMhEoqJ/nedibYY2I8QqRvtG/cOUhzUWvUC61pAOuEBJIJdGl6H7SW1dYQHYVlZKBNfLY+V5kFAnsdz0NPzN2Cew3bvimRK1m1HXrYEmvqbpWQoAe9CUw9ag3BUVxGzBDMZqiffEPC9hMSUhqpr2uUz3Q2Du/2UrAKCtaoBQuJfVsIZqOegf/+DmyBlVMzC8a5RyQYppJIFBTqNfkSpAWnf5s01TCPT1yQkDL2KH/V97bwwX3LwUz2m38mMWwF9oUNCsrwe+vYRJpcOijTQgJvIaV/kzQNfC/bLeC7ZbNDx0y++OTc14KgASSmGiHaLw57k7EXRPC8xEtIz+5e4I5o9AXQcDXFwK2eFu495cLGBdv9QJNjBWGqPdyk7jEyiUIFefOxV8AUjLk0XkLqQeL3AMFEc0hu5fRw76llG+JzAqCPmBVQX/tIZgQkXIC4mNa5CDKbWRQSuBpWg8n+l7eyIMzVfea4lBNPx2cH8hAQiyIueAk2EYFeDvs4BAQxFyzbLSn0JhfMrZqXOunxojujVmM/E2cbGrZP6JjoXr8SQgsN0y3FuXsSJd8CCOBN+W/Qt4BEJOt+PumDAyCgREjjLSBcHtZOET6ugVjLm5a5exEMkfyCWSFl5CbOBRRP8kNza0BF+84u3bI4t26cR+QCa2s8N+jOCiiLGKRN5ovDrJDATPgWIDGfz5fCKICTiuWqdfO2/gifGENLgjVVm+q/AIw1YFBDSMm3vMnuuj4vOQV2Elr9tvAlPhLg5gUvOfpg7MbxYDW43R78BihC2GEOJIXnM17BpJBAQC/yKj2KgaL95p59ZOYmyk3qiYd9uwXolZEysbxTreuZhr23fHbRDhJSnxNrod24GmGIJvrOKl0z80W1h2zMLAexLorwnn7lQtORWNKFmA008i3RGSQkmISetVRotOVLxFJVYug9RxZPeMTXReI23dH1mxYw/yYICZOZB598HOEKA4SETzEAsGADIBcBZ80TkxJ17NjEUmXEys5Qr+iZiECCFpi21CvK5zbWvocQjYVkMQwQkkQq/QU4rUwaCC1icnIAESnWgqf06tSABY2zgxZJvV3GYpeBo4Y0ztbwdQ3EhlhoaoSHO4thgJCg1BqLJZwUwFWiaHOVenYoJJ5d3q5jOwLdw7sCFMGUvyJE7OMS6CsTu0QA69j+iD/DGkSkJNhV6tHapWkf/9u8hDGWNlz2C4l1RdBDevSEs6FkYldVkEdUTaB7WBs0uBoam1BIPUUuEoVUJnKVS8VSuQiJ3cDyrIChyhOQ6T+ceJ0kE4V4RteUNR/1kjLiKcBTMRdf/UIiaZTKrrK4oB2YN8YloD45/L7LMX6zRb5uQYGwaAwDcjEOJDmsqMPHxLSBkAiaofkLCTyuMHEKdr/bVFEPuff1pVGPnZs5PsVHLnbDzN4ydsKQMTE6tvEQmmypEdme9Kv9SjmhYobMwhCoCK9cPePZmhCvmDnAIG8blqXKDHc+wzDNCLFDr9QSL2MVIX0nTBj8cnIGVkiKDjoebHUOP5YCE3jdurit1X6K8XgVH8bJ2QjMbFe3VHrKffGLZzEoJF593lmEFRrOwAoJdlgmwXDHCWhLJt7Bee+U9OLE0BWz4U3jtTawhd4wlaXqOiq6QEi8yEtEcp8+wHHsNi/Y/GCuIBgYQF8nLHc5cwgspePes+XO3LKksHuTQUBuluBHan6jqvrIWEUo79EIXlxfMNrieUm29qt4P656s0ICAIdoURN8Zpz//aJPRXIJqxRw8THS85j/O/uWF2ioQvbUiaDXNeOKS0Ti8VwNQD5xbBa2MERxAVmTFx+SdmT9/HfCYA3D2wNpDR1nKNOkqj3s7RLQCbwIGincJJ64DHjtEGuBYOMmvkjXBrcJkG5RGzFR1mzS9MDE4+CDwXcy3ApbwHr+UvJc8J0THsYnJwQFhdSjhy2AGO7hjiJIzq4miCoAh8F22sdmvYxNL3YVvFA+hgq+tKH4cLdG5eavCBJ8UYib1EON+WRobs0azoIyY+xVIYXM68bTC3I8YIhztxdOZ8ajpTWVfzv5StzS6EfPAJ8yoby6St21uAyJuE/lk6A52E1IT8/fdhG0lrFCmR2h8PQHJZvbKVongfkXTxnCA9Ort8G+Rk6tkISbr/AtVzaHxLDlh8EIyttvbzPBYUZQ1lxSdL75UKwy4v6joEpbN2Ugmq0FXOrBLSSwNAjal2yqbaQiac99UzLDTOWNxrQurap0Z8kL8XiRvnTSr7Gj1KqArw3DAaYcTqMpDIeMYK2kF/XP30sm/QbepltjmGMY+sabhY9742FucdTDh8Bab91QB81HExQrHMSQbT+35uAYHhRZDWNwFr8U7PFUht8XyQ96xENp3j+2qaalu34c3sm6OOqRCFtq1Nbdwg5zDGIsCYmxSUhxQXefsHpMtqWGw1D2u0t/O1raeIjdqPLozJeOwOgxzhY2OtTX2cUvDHb4pkuzgSQY0iYhLYxY2Tv7mSUxOjJO1efnf3fp7wm4NuM9Iiumjk0weaJPSG3b1TdZfQAxFnqSJQAuoviUNWh0sVwwoyHv4vUTBX89nqXEdUFIRKfPe6sDorYu1qkOTSu7/GEoEbfiAKp6JSZuTZjsH1cG5QTZq6yhM5xlKlsvFL53eGOSnodVU58ugl2tNu/No2jtNax8YLwaBpXr8Zv6JSkdU2kqg09ajH8cfqNGbbjccrLg7eJ1iXoVGTaPXI4PTZtnjwqrNG2NLB5QF3et2HeFCyfZrAmoAwANF5C5vEifWFu7vDnUw55+uqEoP/vQhmS9gGBLWGfmgm0iYEyw+cdUZRpU1V1sOiKwT4kzkH33jFZzQpnJ9JT72M2kZIbEcCT37CnbeeCDYy8pDYn/LmHnGXwIzDDNlnj5jVNY4LD1B7E+JS5crPscAE7BR/CiTIykNlsruJgb6jzoNY3bD/3P9UstJxcY0n5w2sYCUJCSDdNsjZ+8ti8c44ALUvC8zhlInAuqxSFOKDOZsE4YNT1Jo+u59PJPv6BAQFMMqxsXdFcJzEOs6m2Ybksc9trVNXXWsvY+aPtiS7hYIYFdyCohAXJ9T7REx6nz6zsqDmz6YUUQtiQYMjrRZ+a51TOex0JjhybDPFviDR1Xr+rLk6QG3xPIGdhGJkWuJwiqqwcgOa2xxphgeOiEMdUuVnRj3EPxDPy35ZbtKv2x/J8DhjdMG2ty6fPfCoSo3b3L+RVf9I5AiOh5Z+lBPNVwBrYnZS/5DqwODHtghhPaKBO0x14fvVH6SHi80d149KUf7teYEhDevLkxYQe8fBYv0BBcVXgxWo/V/TiJLcigc3wQsELqBURf8ilgCNOtVVnUTAzhnSEOjdSx9/InRVk/rZoLmxkH7S0I8phY/lzyB3LYfjYozx78V908d0FHa/s0O+rffHD2C0lMiHABQXa4+o6qNj5EnASGqm69WLT5x5XdeRffTzTF07SABad+l7TT35Ful9yy91mtDkYupl0h2mGKD+O0fiG9nbbvGtwuIEiBgBW5XSdUY+bs9dzWc71ka9HjV7cWP5nY2nPdpJMOe5Yfn/NaDMyxDltWqDStpy63nB7bWy906cOU/FY+dRygnYGn8HNQCQdNouYQHazOi7p74iO49/UL2xzscKRr6J6yT05u6TpZnz+Hi/7yyU8WLop4CNfboS/dJ6de79+gQyP0Fy6eDPMGCEki0X6s00r/CAC8NJq2nuYAcCWfgVN2TmUJ79S0nf7q/Huaktq98C9m5l3T2GEJF2gcivKe0W9ANWwce8Y7tR2nzzUd6TXMIqTzO8b/bxEG9IC37zp0Axj7uxDmiiu/dop5CY6ddFa3Xih692BG2Qt7l08/Wvv9HC4Bebn4N752+1dlQyEgaE8NWNJ/HkYZ4vusLMs2O70cBvQknEjS5Ls0Sa+FKIx8lsN/Ln8Ud3vkQ3UkKRqwELRc0i4QalClTx+o2qMtrPxqulrXZVIhMKaUELr8MNyVOhGOck4zznPE88n6/Qdr2i4p+3AzlFT6WyF0TAoC7rjLAxkt5YsIdswUwaYMXg3EF6c5OOgdbc2qa+eP1H5HH6jcPa1Lp/r5DTVXqC8dHybOmPfn0vEeUXY183CRxeam5/amhut9R9DgJdvSCvD5LN5hUE/CJWkCvQbj4BKImhSiMfZvL/41YV7w3SV8D1AZl7fwTENFL5e3nGoort7tdb7p6BSaoeZbKDMoe2HkqoPLo9dGwfpnyAQEi/2WN4qekOsFxG7hYjRPDmLOQoJZIWTkJX8KJtqHLJTvz8bnX19e9IXG1s0ZgJDRUOor1W0X647V/iA7XV84sVPbZrUhN9J7WtmvZ7/So5B6zehndmgiXTmHN1Zcun68/zgM/D9C4bbUAsHWdJM9CdeBZKjnaUJ0D0Rd+NSpveeG76v5D9duSv6wBt9TwKdMHwwDe6qvXmsrrz1x7SfJiYb9UYBrAuThj9UB7qSrePS239cHKEJxrxugIFmNlH9Bzb/Obj0LAorTF4HeQMulOt4vvb4c/jXbk3AmzE2vAMiLOM43YK3pmYSdteYuUII5pb1D3Vp+9WZp+9mGYrdzjYcn2NJTjPnCFzitit2o8nMdh8f9oRYOkGRuvl/yYtXZxgPGPffj7LSCNcb88nnmFBK+6ot0YfAf6EbwQWYIc8eE1QdAoYikGJ2qQVVZf77xCHG2oTCwpr0igmEouzaeq1jRnhK56rQy/H4/cHNHG/IxlHGK0Va9UfAEU6+6GjaALvyDNAjIB7oEXvgLDpxCwtgy9yTPgT+0OwBRdmeLYAoOKoB3kM4at7A0JWKlJtA9YiaQ4bUAdxA7NKzRDmQf3jgTlgEKYxq0jrx9+4r9Pxmn8322KCSMCP8/BQxTm/kidRScXKzomBdyV2lCyDIC3Nl49T6cgmGriU1Pu468MMijq28DUBa+AGVhpf7Zml9eQsrarxTDX3kWAgGzqq+HzKcZbpqv7dZ0SMtvnJkEarJZpUQIo7DZ5Rqc/ymLD0lVgIqPx3mZkPKOglXrui/ChhVVceXu28xbNpi67NTCYGuHOT3vvISEgZ/+ShlGiVnH4AArMj7mnj7/7YMTvGPxtltWMODDb/j09J9qsGlGT0jIb6BH+NU7IldXTQtICICrMPEcw5tPIXSEwuITFVWtZef+c+lDr/PNRy1ZKxoJEZqdvSS/VigdY3hBle+9hprcD0j6h5l1cW8WRPvNMaX7a989mFl+5cbpGGOipp6DvSZeAfNSzZSA+PFSUmb30/CmaPJIo2A4u1x1s6zxYNUexcn6guk8R4gOHUOm7Vi2v4AHDYsggoSEsaXnJS2DS62/hKgI3zG3ZXEe3hdhcghq7rp26JV97C30uOigEOIZVX5H1Jpa2K4cbOtVzoOQC0uAA49Ms1rb2dTcWdda03aRBl+Z4kLT0SghZqc+kj1g7F2as6xwnzAWzEMLnjdyUgv3wJ8tZsJ+sZxA90i8+9VsT/GW+weZIg132FUmhN5TNdY9lGhW1SI4UtJQ316p6tGpBuyI1VI9km6dWgZvEuMu673zAJQHjVQko+B6TtpN4qHF+EHtpiUil3711lXqRovJge+NllIT3dpOUkurUZe2TaxSt0lhw7y8pavOu62nxRd6CL5vwtY7J3Rw1mhVTpr9BITrJ1hIuNC2tMLt6bnJbuCPeRg/mwsUQeMdSINCfUdl2Oel74QNyuBIaOrkyHSOLBouZH80J7Vgj73ZsXpRmZNW8EbNjbLtMEw0mmPqcvOJa+byRlm6DrYePJ6Tmv8PR9RL8JxkzERrV+N9ni7+n0L6gKEKjnZUbNq7wteKMd2YhLM/q8C6/UDOsoLvHMWozULCjDW1X0mQiN1fdZN5BpEEIitullZ/cHzzdLgI1stRjDsJ3jq4nzM1e3nhSUfyYxchYQbx/4DD30z/C6JD5q9xZMPwwH2cJKh7300rruYBaxOI1XOSMVW8JcxHgVJgS9QfIK9f0zKGGwXPDNRhm0/39fihEBBuL7v1JMPGf2qPMhX+sH4npA3HvgdDVuwdr4GD4A9n22mRypc5u/UkQ4Lbl+XnEaKeGNik+Q6k6wzzRmgc12GHSIdih1pAuL0c0pMMBZG5OzGWJskdkIZteyMx7CNpesO7y4vwTVzDEhwuJLZWYJ5Iz1Muh+sINsFrwev/GYalNQYSPQPWg5ewhWVg8tA/DY2QDOqVmatMgAXwczD7phokO08UMQdg0+ufslML8uCFwkrCsIchF5K+xuvzUmaB6X8NPD8InwHuDz3MEP42Yecco6O3Z99TdH4I6fIiNWxC0nOHHYrNHcRdCDGrgZk7IF1/X7YexFG/zXC9yZcMTX/W2BNQ0HcK31G0bMI77EIy5D4riyBb5yhjdRSTAm92CuRhTzC/29ANEZmO18LYVQx3xx1kKKa4QR1wxpkFY1gFpxKSIWP6+MZcpS/ov9HQwNE0oieBLysAjJkeMG94AowHpHtBJVQQx26LNhAuBba0BphPrsKUUgF3yV3RkkT5e0vzG/Q4R9rv/wN1zenro4tofQAAAABJRU5ErkJggg==" + springBootBase64Image := "https://spring.io/images/projects/spring-edf462fec682b9d48cf628eaf9e19521.svg" - pythonBase64Image := "iVBORw0KGgoAAAANSUhEUgAAAEIAAABCCAYAAADjVADoAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAaGVYSWZNTQAqAAAACAAEAQYAAwAAAAEAAgAAARIAAwAAAAEAAQAAASgAAwAAAAEAAgAAh2kABAAAAAEAAAA+AAAAAAADoAEAAwAAAAEAAQAAoAIABAAAAAEAAABCoAMABAAAAAEAAABCAAAAAGpSBCwAAALiaVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA2LjAuMCI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIgogICAgICAgICAgICB4bWxuczpleGlmPSJodHRwOi8vbnMuYWRvYmUuY29tL2V4aWYvMS4wLyI+CiAgICAgICAgIDx0aWZmOkNvbXByZXNzaW9uPjE8L3RpZmY6Q29tcHJlc3Npb24+CiAgICAgICAgIDx0aWZmOlJlc29sdXRpb25Vbml0PjI8L3RpZmY6UmVzb2x1dGlvblVuaXQ+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+NjY8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjY2PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+CiQPAV8AABBVSURBVHgB7Vt9dBXFFZ/dffkGEmISIYSEYAhKjKlgRTBaUFoFi1IFxY/SD49Qi/X0HE/VeqSntp5WPVVr29NWa1EUPVLrQUSRYkQPICCoYCBBvgnhKyEkIeS95L23b7f3zuzdN293X/KQpad/MMlm5s7cuXN/v7mzO/sRxs6lcwzIDCiycFbKhVUDBg9Uy4vHXT8sp6BkhBkJDzMVs0AxzUGmyTJMxjTTNCLMNENwdBpMaVVVrbn58/ebwu1tR7sONew5K345jJ4lIoqzL5n9g2l5wy+cmTd8dI1h6OUMQQNyAA0HwMeygbJ1MKgHmdfbdawz0t25I9iy/7Wt773yCmvbecrhv2+i70TU3P5QbcnY655LyxowNhaNMD3SC85KoLGMRBgA3CpzMpAEkpEg+AFmoE5halomi4S6t7bWf/TTPSuf3+AbesmQJpXPuFhzy8OXVky5c1UsGi5HAnrDERbRYyyqQ8AD5SrSjiTgjHPQWBQylHiZk8JJQBnbDYaEKqoyJG/ExTPT84eub9+xoemMnXUY8JMI5bK7f/N3IAGWAoKPsZqywWz62OGsrCCHNbcFWW9E54RwsBZIRCtkkVvo7Touc5KAkHBv5sDiUVd3HNv9euTE0ZADyxmJvhFxXknNsPJJMx/Xw7054WiMXX3R+eyxmTWspjSfja8oZGWFA9jar46yGCwJnqxIcJLglkXUYD1GDSyV/PSM7BNtDes+EYb8+av6Y4YxPT02NGbECtBVXAK3XF7KAmrc/MTKIjZmWB4LR3VEZs+4wMfDw67jUZCgE2839DDLOX/kTTiMX76jHd+MqYqZAQDgqihOgppEAjmswYkCgcdnHVqSACYdq4Pdx4jFWCA7t4jlVwwgu37kvhHBneEoITrg5PjeF80J/m1v7mCNhzpYuoZkiBkWJMQvp0KOR0tcFuShjAeQobBYxLdljY4GErw9EyEadzYjoLIVWw6y7t4ImzCqiB3tCLJlnx1gwXCUBWDZ8NmWrhI4LMC361FCwLYeyUILLqjQ7nPyjwgEg85bP3ieqNt2mK36spnXp0EF8BPXsXQRD+9HsgcBoCCsWm0o+538IyIAc+1wFCMDThtwwJXCaiPQKMt1Thn15HYhUp115fGRDd+IUAxTV9IzQ6oRAwwCOAKBs6cDEMjWztEwDcOMRLJg0QegFtTFckhGChGj0CX4/5GI9gP1G+t+d9sYRVHFdOHOOhMOzBNSjyQpZuWN9/110LDR041omBNBYIkUr8gQRBmw+PxLpxURA4srC2DfX6pqSjZcGnAS7aQrsIuIwF6YajOgCcuYJ6QsS4KzK/xCdIAGKHpFjqsO9Hm0QR7I1CsqBhQOHZo3hkUN2JyIpEEQ6ooaDcTCTR9vPn6M6vvLUyKi+taHrs4fUXVvXunoCaYeGw4XPFz88ItH/PKXcDcp1zv17L4m3EeE4UrYSzxYNgUxzqhAJazjy619T+ilhd+bUXtj9SIWjLhxqsqhbduOrXhgQd1jH3zQfMStkFjTNxEVFRmTZ/36qbySyvm6HtWioW5vR3FG8W6SAIJs31JbZTpvQAtvQ12MBEEeBgRviUcGtuEP6lltcfschBLuhd0VkhDE0HIk0yypHlM4d/nim781Y/abN6386MhOh0aC2NeGKjB9/sKXcoeNuh9ugTW44eGOofO2Y8LV+C01yQQSZbqlhmEJGOZcIoBSP66DskWA1YvLol+C/94CLh8WY7CRYRkqG7309RmLJk0agWespCkpEZMefu0BNS3j9mhP0HLTIoCcJ0dtGfmRdLDeaoOCKFvtskx94nXedrCdH0mhYAPqAAH8PIJXLijDJi4zJ238b++vubuvrp5EDB5ZWzq4ZNSDOicB7bsBxh1HkMl1ZD1RRnOJxDhluQ8RxfO+kHDwFgkYDUSGFRlV1fn3VFS4T91k0pOIktqpt8M1Ph/xOZ1HOZmjTkAkx/tg1/jJtT878X7cE/IZ7sRhl4YJnveARfjFpYARIB9ABhGi62xwQXr1gnkTx2I3r+RFhDK47KIb8Ex+Oo4KgDCEgyinDXmG3URRpAg7drvDc0XR8OyowwV7kACPEUBRQLlMCpRjulpemjPFYcoWXUTcfNfcIXCbW2nEdMAgHHOC8wKD+GXQyfoIHVSN26Yy2U2wY7sqCrBhY+HuroM4WkFhThXQASUCDWUeGVhH9VgHR0+UXVyTXyusuP+6iBg+YfoFphErEhMrLwPhfN8AvcERQAEY7PBwlmxjjRiQtwk9ZNad1PQMdrxp7xvQooydOPwq1hOGIoEmQiC3CbHq+PLIqKytzh3sturxYCarsGwMeGWtPeEgOo4/fKasMq8RYSBqcIZRBxK1Cdndz36CLTS5XasXTG7cjtNhjIZYOFTfUvfHRS88ceX1EO5VfUcDEiKRFI0WRwJKqdMuyq6IMPTIaJoR4ZIAYpOAgK3Z485bMgdNZcyRFJJRgjLKvI7LWJR0JF2u5/AWSdCyBh5rXFv3/aoqpt1xZ82TrCcCO1wJaH9lZqbfe9tITyI8dpZmqU2Ey1Fv522AFhhbhtnFx9ZqIB2qMETh6RIcuEVOHANO/1KdgwMwocJ7jWBd4ydvz49sf2vX5sPzFmelsWoWBhLsqwWMRWU+CTgeHFTmbSYbNTKnzGkfZRcR4GAeAcE80WH3DLp1cHAVwAeYqSgHI8HO/d37t50ES3jdhLt1dBjdAz0kCn6xBy0XLIqk4KuQcChi7u1pP76iY9PitXdOzR/04vJ7lmSmabfiRskLqKgDi3wczK0DB4IUCetFwn7iXxcR0JwGTicQgGQkAgYtR52QDf5WCnaja5rWr3z6yJqVdYwdOeP3D7OmDil8pO6W+d/4ZvEv4N6iDF6QoANwOIDyCLBI8IgGzrhpej70dREB+KJ9RwH6IN1g2YQYTMsYoB+rX/PozqVPPQla7PlnJ99wQdn4azQzrRioFZsgbOgr8YmLwXIALwwzfcK4oqLMgqwL4ZVZAesSrw9tApAMF3giRyKK9NBXEX8uDzyIMDtp9iknYryjQEQPXNaM5i2r5+1755mFLzw7acqc2Zc+k5GbVQ1vdFyDJq3gJzsHgB6Y/ZP4MEcGbc06RQS1JUSBpS8TBXrw4DDoNb6LCGC42V4K0JH/0KyjBGXezl0TZTWQxjqb9/4ZSVj68rRbZ9w19lXW0ZvOuvAan0JKcDaFGZUBp9wX7arwOsHwfDbhIkIJBHZysBZoHgUSIUgMJiIEc0UNHK7/cNFjd91YPhpIeJG19aSnAB+twC9GDM0e2raI4Dm1Y53UJpftvpKdpH1NtutAaI+Xby4igscON3LbOBgcMmDujF0nnMNoaD+48012cFvHLxfP+RPrjg70GshVJxOQ1HEHAQT6dPVxLMSjKCf/tmS/JxGuDdXhTe/shTlu4U+abdDepKBt3Ce07tz+7viK/EFjLhs2hfXqLsyJFWgLZw+dc2yGeJ2jXt4ZOvU5QIe+lw7ahWfKHSfCjZvqe/E+xZVcRLzzxsKj4e6OXfgmF6MhMQpkGZsgYuAxfkvDh01anlnKYuYQ1wgJFRYJsrNeQL82SbTBchCMhMEj4u1bO98Hd1DJlVxEgIbZdXDnKgV2g0iCc2l4yHCt02KqobmeVyeMxmfPw0GqTykakgC1+2J0WAcn2BoPvtdgSiyy8O3WtxN8kgQvItjR+rqluKsj0M6owHpBEg4KZdgj6BBAkl2piLoOAmzHHfUEwqlv1ycByvXBliuSQB/rVJMFu2JrXl7W0iA5llD0JKJ7z+aGSE9wCa5/Dhgw2qSIM6klY3MS/DgMzTbmX2sJWETJs5tAkkSMPBYRQwTmaOxfy1qfAyfQEc/kSQRq1r/19K8CmTl8T8FJ4BPOzwlIS/zHkwiMAhzTw1GbEAQpAyVdqnPICWQ6+1p9iAwaF31Ogwc5wdi7P16we4UnA1ZlUiI6d6xvOrxl9Wy49T2kammAyYoKpMBaGlBwRwTNQsKsyI4SQMqpDXJPklCPdKwyAZXr7b6WXQSYrcENqrF91tyG+SBBQ/Lk2kfIql/9+8n1xxo214688ruP5Awtn2bGjBJ7iQBgvJNUAxnwpQzQjonPGjriiAicGSKIyhwM1MP6ZRl4GwImoChsOMvYHxtxqaIuypij7CjTIxZVaalv6H73wSd2LPjPutBRUO4z9UkE9uzcsbrpix2r5+UUVg3R0o0RhmImvihRFIN17T2coRUV2mBdhKCzOCHWbHFQUM5U2OZPWx//ycMb3+vTS0cjTIYyIMtM1zT87ART4t4FLmNRRdEPrtnck/jZjlD2/NsvEdQreLwBX6ji4ZkMFT+GwBDGA2fPAk0RwImQCaGZNNv0iLEvlgk3AimkTMNUQiwU2bKFnUhBPWUVi9GU9ZMqXnVF8ZVr3p+1Dr4PsmbfCZoiQiIJCVONELyXg28C8DW/HDFWf04gnSOwL6yJHLZv8nc+v+bjrZ2dSR06zYaUIyI1u5bDCYD6iYyYmc2CerYgwUES2eG5RQwvq/l6LMXnG6k57n5Ul2I/bzWcPVwatBxcAGSgGCEyOCpbuQkPYfCzBx4RECwKXLmIGNNxUvD25rRqfYwIdBpJoPDuB6iLJEnfABIyKxnLvQ7sATGdsAUIwaZQSfHu/rQoEMo+EoE+SyTwmaRZpnqQiSiKhgQ9aDfgHJMxkrHSP8B/cuQKL3O/zdh+2Ar07j5rZKR0phbe9P1XiQEyTgSeJygyrDIRRCTYehgFSBLqWTkSMejaOAk4rJYnooPbBTmgmtHskziIb8k3IuC+5Cjc4XXFo8IC5wRqkyQTgIQQGVjv9QWMhRu+3O1oix7/9FPW7RsLYMg3ItZsamkKdfV+jA9A7PC3Z9oJ1IskJADq8UYPHxtEW+M4oy3wABfqFFjJsG1uaOh6SwwSVznTkp/fM5vlRemNY2uL7mChKOw+pfNBfxssXBZ0zsC50dvgs5+NECRd8H3UFsZa/gLnh/1wAs1k8N3U1vse3f+zPYf5298zxW/3921DRRaXvnDF1BkzR7zKOnvPY/Bxuh0dNlCceSCJg7fKdO4gI5jj8jBp6wyRMBBI6DG23Tz3y5kr1p3aJav6UfYzIrg/S5Yf2rNv14nl064dMjwwUBvN4FaEwT+yCEKkKCFiKBqcaBRwDfcOmfDgKyc9uK0x+Pycnzf+aNWGU4ecqn7IvkeE7NQ/fn/xpMtrcn94SU3uJPigo4yTAV8Esh4iBiNGSnAixM/gRIKw0dTG+vpTK5568cDLry1rb5Q0fS+eVSLI23GVrGDOzGE15WXZV5yXrVw2cdygElg2+dAOn+Hix6uwU9TUUydOhNs31HfvZhrb+M83Wzd/tjvUeOgQw9dcZz39T4jwQmF2rh3MdC0brgQqM1T4TunUKaVosq+XRK9xz9WdYyA1Bs7m0pBty+VUPMPLCyW5THW+56frIDlA/TDHA0/1XmWqk3OnDRkolpMdeImhNrmM9mQbKJ92QgdTSQQEASc7cE/i1YZ9ZaLIljwuAaScgGLuPKxrr6ue9MiGbL/fcn9EOIERWMxTObA/9SEyMMdEY9NsOsETYMxTObA/9SFSME8p/ReuClWyHxZNZAAAAABJRU5ErkJggg==" + pythonBase64Image := "https://www.python.org/static/community_logos/python-logo-generic.svg" tests := []struct { name string @@ -25,21 +25,22 @@ func TestGetRegistrySamples(t *testing.T) { wantErr bool }{ { - name: "Fetch the sample placeholder", - registry: "sample-placeholder", + name: "Fetch the sample", + registry: DEVFILE_STAGING_REGISTRY_URL, wantSamples: []schema.Schema{ { Name: "nodejs-basic", - DisplayName: "Basic NodeJS", - Description: "A simple Hello world NodeJS application", + DisplayName: "Basic Node.js", + Description: "A simple Hello World Node.js application", Tags: []string{"NodeJS", "Express"}, Icon: nodejsBase64Image, Type: schema.SampleDevfileType, ProjectType: "nodejs", Language: "nodejs", + Provider: "Red Hat", Git: &schema.Git{ Remotes: map[string]string{ - "origin": "https://github.com/redhat-developer/devfile-sample.git", + "origin": "https://github.com/nodeshift-starters/devfile-sample.git", }, }, }, @@ -52,9 +53,10 @@ func TestGetRegistrySamples(t *testing.T) { Type: schema.SampleDevfileType, ProjectType: "quarkus", Language: "java", + Provider: "Red Hat", Git: &schema.Git{ Remotes: map[string]string{ - "origin": "https://github.com/elsony/devfile-sample-code-with-quarkus.git", + "origin": "https://github.com/devfile-samples/devfile-sample-code-with-quarkus.git", }, }, }, @@ -67,9 +69,10 @@ func TestGetRegistrySamples(t *testing.T) { Type: schema.SampleDevfileType, ProjectType: "springboot", Language: "java", + Provider: "Red Hat", Git: &schema.Git{ Remotes: map[string]string{ - "origin": "https://github.com/elsony/devfile-sample-java-springboot-basic.git", + "origin": "https://github.com/devfile-samples/devfile-sample-java-springboot-basic.git", }, }, }, @@ -82,9 +85,10 @@ func TestGetRegistrySamples(t *testing.T) { Type: schema.SampleDevfileType, ProjectType: "python", Language: "python", + Provider: "Red Hat", Git: &schema.Git{ Remotes: map[string]string{ - "origin": "https://github.com/elsony/devfile-sample-python-basic.git", + "origin": "https://github.com/devfile-samples/devfile-sample-python-basic.git", }, }, }, @@ -110,10 +114,10 @@ func TestGetRegistrySamples(t *testing.T) { t.Errorf("Got unexpected error: %s", err) return } - if !reflect.DeepEqual(registryIndex, tt.wantSamples) { t.Errorf("expected %+v does not match actual %+v", registryIndex, tt.wantSamples) } + } }) } diff --git a/pkg/server/devfile-handler.go b/pkg/server/devfile-handler.go index d99cb4582b0..31b358571a1 100644 --- a/pkg/server/devfile-handler.go +++ b/pkg/server/devfile-handler.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "path" + "strconv" buildv1 "github.com/openshift/api/build/v1" imagev1 "github.com/openshift/api/image/v1" @@ -40,7 +41,6 @@ func (s *Server) devfileSamplesHandler(w http.ResponseWriter, r *http.Request) { serverutils.SendResponse(w, http.StatusBadRequest, serverutils.ApiError{Err: errMsg}) return } - w.Header().Set("Content-Type", "application/json") w.Write(sampleIndex) } @@ -61,7 +61,7 @@ func (s *Server) devfileHandler(w http.ResponseWriter, r *http.Request) { // Get devfile content and parse it using a library call in the future devfileContentBytes := []byte(data.Devfile.DevfileContent) - devfileObj, err = devfile.ParseFromDataAndValidate(devfileContentBytes) + devfileObj, _, err = devfile.ParseDevfileAndValidate(parser.ParserArgs{Data: devfileContentBytes}) if err != nil { errMsg := fmt.Sprintf("Failed to parse devfile: %v", err) klog.Error(errMsg) @@ -70,20 +70,20 @@ func (s *Server) devfileHandler(w http.ResponseWriter, r *http.Request) { } filterOptions := common.DevfileOptions{ - Filter: map[string]interface{}{ - "tool": "console-import", + ComponentOptions: common.ComponentOptions{ + ComponentType: devfilev1.ImageComponentType, }, } - containerComponents, err := devfileObj.Data.GetDevfileContainerComponents(filterOptions) //For Dev Preview, if there is more than one component container err out + imageComponents, err := devfileObj.Data.GetComponents(filterOptions) //For Dev Preview, if there is more than one image component err out if err != nil { - errMsg := fmt.Sprintf("Failed to get the container component from devfile with attribute 'tool: console-import': %v", err) + errMsg := fmt.Sprintf("Failed to get the image component from devfile: %v", err) klog.Error(errMsg) serverutils.SendResponse(w, http.StatusBadRequest, serverutils.ApiError{Err: errMsg}) return } - if len(containerComponents) != 1 { - errMsg := "Console Devfile Import Dev Preview, supports only one component container with attribute 'tool: console-import'" + if len(imageComponents) != 1 { + errMsg := fmt.Sprintf("Console Devfile Import Dev Preview, supports only one image component, now has %v", len(imageComponents)) klog.Error(errMsg) serverutils.SendResponse(w, http.StatusBadRequest, serverutils.ApiError{Err: errMsg}) return @@ -97,15 +97,23 @@ func (s *Server) devfileHandler(w http.ResponseWriter, r *http.Request) { return } - service, err := getService(devfileObj, filterOptions) - if err != nil { - errMsg := fmt.Sprintf("Failed to get service for the devfile: %v", err) + dockerfileRelativePath := imageComponents[0].Image.Dockerfile.Uri + if dockerfileRelativePath == "" { + errMsg := fmt.Sprintf("Failed to get the Dockerfile location, dockerfile uri is not defined by image component %v", imageComponents[0].Name) klog.Error(errMsg) - serverutils.SendResponse(w, http.StatusInternalServerError, serverutils.ApiError{Err: errMsg}) + serverutils.SendResponse(w, http.StatusBadRequest, serverutils.ApiError{Err: errMsg}) + return + } + + dockerRelativeSrcContext := imageComponents[0].Image.Dockerfile.BuildContext + if dockerRelativeSrcContext == "" { + errMsg := fmt.Sprintf("Failed to get the dockefile context location, dockerfile buildcontext is not defined by image component %v", imageComponents[0].Name) + klog.Error(errMsg) + serverutils.SendResponse(w, http.StatusBadRequest, serverutils.ApiError{Err: errMsg}) return } - dockerfileRelativePath := devfileObj.Data.GetMetadata().Attributes.GetString("alpha.build-dockerfile", &err) + dockerImagePort := devfileObj.Data.GetMetadata().Attributes.GetString("alpha.dockerimage-port", &err) if err != nil { errMsg := fmt.Sprintf("Failed to get the Dockerfile location from devfile metadata attribute 'alpha.build-dockerfile': %v", err) klog.Error(errMsg) @@ -113,11 +121,11 @@ func (s *Server) devfileHandler(w http.ResponseWriter, r *http.Request) { return } - dockerRelativeSrcContext := devfileObj.Data.GetMetadata().Attributes.GetString("alpha.build-context", &err) + service, err := getService(devfileObj, filterOptions, dockerImagePort) if err != nil { - errMsg := fmt.Sprintf("Failed to get the Dockerfile location from devfile metadata attribute 'alpha.build-context': %v", err) + errMsg := fmt.Sprintf("Failed to get service for the devfile: %v", err) klog.Error(errMsg) - serverutils.SendResponse(w, http.StatusBadRequest, serverutils.ApiError{Err: errMsg}) + serverutils.SendResponse(w, http.StatusInternalServerError, serverutils.ApiError{Err: errMsg}) return } @@ -128,7 +136,7 @@ func (s *Server) devfileHandler(w http.ResponseWriter, r *http.Request) { BuildResource: getBuildResource(data, dockerfileRelativePath, dockerContextDir), DeployResource: deploymentResource, Service: service, - Route: getRoutes(data, containerComponents), + Route: getRouteForDockerImage(data, dockerImagePort), } w.Header().Set("Content-Type", "application/json") @@ -186,7 +194,7 @@ func getDeployResource(data devfileForm, devfileObj parser.DevfileObj, filterOpt return *deployment, nil } -func getService(devfileObj parser.DevfileObj, filterOptions common.DevfileOptions) (corev1.Service, error) { +func getService(devfileObj parser.DevfileObj, filterOptions common.DevfileOptions, imagePort string) (corev1.Service, error) { serviceParams := generator.ServiceParams{ TypeMeta: generator.GetTypeMeta("Service", "v1"), @@ -197,41 +205,32 @@ func getService(devfileObj parser.DevfileObj, filterOptions common.DevfileOption return corev1.Service{}, err } + portNumber, err := strconv.Atoi(imagePort) + if err != nil { + return corev1.Service{}, err + } + + svcPort := corev1.ServicePort{ + Name: fmt.Sprintf("http-%v", imagePort), + Port: int32(portNumber), + TargetPort: intstr.FromString(imagePort), + } + service.Spec.Ports = append(service.Spec.Ports, svcPort) + return *service, nil } -func getRoutes(data devfileForm, containerComponents []devfilev1.Component) routev1.Route { - - var routes []routev1.Route - - for _, comp := range containerComponents { - for _, endpoint := range comp.Container.Endpoints { - if endpoint.Exposure == devfilev1.NoneEndpointExposure || endpoint.Exposure == devfilev1.InternalEndpointExposure { - continue - } - secure := false - if endpoint.Secure || endpoint.Protocol == "https" || endpoint.Protocol == "wss" { - secure = true - } - path := "/" - if endpoint.Path != "" { - path = endpoint.Path - } - - routeParams := generator.RouteParams{ - TypeMeta: generator.GetTypeMeta("Route", "route.openshift.io/v1"), - RouteSpecParams: generator.RouteSpecParams{ - ServiceName: data.Name, - PortNumber: intstr.FromInt(endpoint.TargetPort), - Path: path, - Secure: secure, - }, - } - - route := generator.GetRoute(routeParams) - routes = append(routes, *route) - } - } - - return routes[0] +func getRouteForDockerImage(data devfileForm, imagePort string) routev1.Route { + + routeParams := generator.RouteParams{ + TypeMeta: generator.GetTypeMeta("Route", "route.openshift.io/v1"), + RouteSpecParams: generator.RouteSpecParams{ + ServiceName: data.Name, + PortNumber: intstr.FromString(imagePort), + Path: "/", + Secure: false, + }, + } + + return *generator.GetRoute(routeParams) } diff --git a/vendor/github.com/devfile/api/v2/LICENSE b/vendor/github.com/devfile/api/v2/LICENSE index d3087e4c540..261eeb9e9f8 100644 --- a/vendor/github.com/devfile/api/v2/LICENSE +++ b/vendor/github.com/devfile/api/v2/LICENSE @@ -1,277 +1,201 @@ -Eclipse Public License - v 2.0 - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE - PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION - OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - - a) in the case of the initial Contributor, the initial content - Distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from - and are Distributed by that particular Contributor. A Contribution - "originates" from a Contributor if it was added to the Program by - such Contributor itself or anyone acting on such Contributor's behalf. - Contributions do not include changes or additions to the Program that - are not Modified Works. - -"Contributor" means any person or entity that Distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which -are necessarily infringed by the use or sale of its Contribution alone -or when combined with the Program. - -"Program" means the Contributions Distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement -or any Secondary License (as applicable), including Contributors. - -"Derivative Works" shall mean any work, whether in Source Code or other -form, that is based on (or derived from) the Program and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. - -"Modified Works" shall mean any work in Source Code or other form that -results from an addition to, deletion from, or modification of the -contents of the Program, including, for purposes of clarity any new file -in Source Code form that contains any contents of the Program. Modified -Works shall not include works that contain only declarations, -interfaces, types, classes, structures, or files of the Program solely -in each case in order to link to, bind by name, or subclass the Program -or Modified Works thereof. - -"Distribute" means the acts of a) distributing or b) making available -in any manner that enables the transfer of a copy. - -"Source Code" means the form of a Program preferred for making -modifications, including but not limited to software source code, -documentation source, and configuration files. - -"Secondary License" means either the GNU General Public License, -Version 2.0, or any later versions of that license, including any -exceptions or additional permissions as identified by the initial -Contributor. - -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free copyright - license to reproduce, prepare Derivative Works of, publicly display, - publicly perform, Distribute and sublicense the Contribution of such - Contributor, if any, and such Derivative Works. - - b) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free patent - license under Licensed Patents to make, use, sell, offer to sell, - import and otherwise transfer the Contribution of such Contributor, - if any, in Source Code or other form. This patent license shall - apply to the combination of the Contribution and the Program if, at - the time the Contribution is added by the Contributor, such addition - of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other - combinations which include the Contribution. No hardware per se is - licensed hereunder. - - c) Recipient understands that although each Contributor grants the - licenses to its Contributions set forth herein, no assurances are - provided by any Contributor that the Program does not infringe the - patent or other intellectual property rights of any other entity. - Each Contributor disclaims any liability to Recipient for claims - brought by any other entity based on infringement of intellectual - property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby - assumes sole responsibility to secure any other intellectual - property rights needed, if any. For example, if a third party - patent license is required to allow Recipient to Distribute the - Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has - sufficient copyright rights in its Contribution, if any, to grant - the copyright license set forth in this Agreement. - - e) Notwithstanding the terms of any Secondary License, no - Contributor makes additional grants to any Recipient (other than - those set forth in this Agreement) as a result of such Recipient's - receipt of the Program under the terms of a Secondary License - (if permitted under the terms of Section 3). - -3. REQUIREMENTS - -3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in - accordance with section 3.2, and the Contributor must accompany - the Program with a statement that the Source Code for the Program - is available under this Agreement, and informs Recipients how to - obtain it in a reasonable manner on or through a medium customarily - used for software exchange; and - - b) the Contributor may Distribute the Program under a license - different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all - warranties and conditions, express and implied, including - warranties or conditions of title and non-infringement, and - implied warranties or conditions of merchantability and fitness - for a particular purpose; - - ii) effectively excludes on behalf of all other Contributors all - liability for damages, including direct, indirect, special, - incidental and consequential damages, such as lost profits; - - iii) does not attempt to limit or alter the recipients' rights - in the Source Code under section 3.2; and - - iv) requires any subsequent distribution of the Program by any - party to be under a license that satisfies the requirements - of this section 3. - -3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the - Program (i) is combined with other material in a separate file or - files made available under a Secondary License, and (ii) the initial - Contributor attached to the Source Code the notice described in - Exhibit A of this Agreement, then the Program may be made available - under the terms of such Secondary Licenses, and - - b) a copy of this Agreement must be included with each copy of - the Program. - -3.3 Contributors may not remove or alter any copyright, patent, -trademark, attribution notices, disclaimers of warranty, or limitations -of liability ("notices") contained within the Program from any copy of -the Program which they Distribute, provided that Contributors may add -their own appropriate notices. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities -with respect to end users, business partners and the like. While this -license is intended to facilitate the commercial use of the Program, -the Contributor who includes the Program in a commercial product -offering should do so in a manner which does not create potential -liability for other Contributors. Therefore, if a Contributor includes -the Program in a commercial product offering, such Contributor -("Commercial Contributor") hereby agrees to defend and indemnify every -other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits -and other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such -Commercial Contributor in connection with its distribution of the Program -in a commercial product offering. The obligations in this section do not -apply to any claims or Losses relating to any actual or alleged -intellectual property infringement. In order to qualify, an Indemnified -Contributor must: a) promptly notify the Commercial Contributor in -writing of such claim, and b) allow the Commercial Contributor to control, -and cooperate with the Commercial Contributor in, the defense and any -related settlement negotiations. The Indemnified Contributor may -participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial -product offering, Product X. That Contributor is then a Commercial -Contributor. If that Commercial Contributor then makes performance -claims, or offers warranties related to Product X, those performance -claims and warranties are such Commercial Contributor's responsibility -alone. Under this section, the Commercial Contributor would have to -defend claims against the other Contributors related to those performance -claims and warranties, and if a court requires any other Contributor to -pay any damages as a result, the Commercial Contributor must pay -those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" -BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF -TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR -PURPOSE. Each Recipient is solely responsible for determining the -appropriateness of using and distributing the Program and assumes all -risks associated with its exercise of rights under this Agreement, -including but not limited to the risks and costs of program errors, -compliance with applicable laws, damage to or loss of data, programs -or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS -SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST -PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of -the remainder of the terms of this Agreement, and without further -action by the parties hereto, such provision shall be reformed to the -minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity -(including a cross-claim or counterclaim in a lawsuit) alleging that the -Program itself (excluding combinations of the Program with other software -or hardware) infringes such Recipient's patent(s), then such Recipient's -rights granted under Section 2(b) shall terminate as of the date such -litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it -fails to comply with any of the material terms or conditions of this -Agreement and does not cure such failure in a reasonable period of -time after becoming aware of such noncompliance. If all Recipient's -rights under this Agreement terminate, Recipient agrees to cease use -and distribution of the Program as soon as reasonably practicable. -However, Recipient's obligations under this Agreement and any licenses -granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, -but in order to avoid inconsistency the Agreement is copyrighted and -may only be modified in the following manner. The Agreement Steward -reserves the right to publish new versions (including revisions) of -this Agreement from time to time. No one other than the Agreement -Steward has the right to modify this Agreement. The Eclipse Foundation -is the initial Agreement Steward. The Eclipse Foundation may assign the -responsibility to serve as the Agreement Steward to a suitable separate -entity. Each new version of the Agreement will be given a distinguishing -version number. The Program (including Contributions) may always be -Distributed subject to the version of the Agreement under which it was -received. In addition, after a new version of the Agreement is published, -Contributor may elect to Distribute the Program (including its -Contributions) under the new version. - -Except as expressly stated in Sections 2(a) and 2(b) above, Recipient -receives no rights or licenses to the intellectual property of any -Contributor under this Agreement, whether expressly, by implication, -estoppel or otherwise. All rights in the Program not expressly granted -under this Agreement are reserved. Nothing in this Agreement is intended -to be enforceable by any entity that is not a Contributor or Recipient. -No third-party beneficiary rights are created under this Agreement. - -Exhibit A - Form of Secondary Licenses Notice - -"This Source Code may also be made available under the following -Secondary Licenses when the conditions for such availability set forth -in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), -version(s), and exceptions or additional permissions here}." - - Simply including a copy of this Agreement, including this Exhibit A - is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular - file, then You may include the notice in a location (such as a LICENSE - file in a relevant directory) where a recipient would be likely to - look for such a notice. - - You may add additional accurate notices of copyright ownership. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/WorkspacePodContribution.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/WorkspacePodContribution.go deleted file mode 100644 index c85595ff4ac..00000000000 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/WorkspacePodContribution.go +++ /dev/null @@ -1,31 +0,0 @@ -package v1alpha2 - -import ( - corev1 "k8s.io/api/core/v1" -) - -type WorkspacePodContributions struct { - // +optional - // +patchMergeKey=name - // +patchStrategy=merge,retainKeys - Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` - // +patchMergeKey=name - // +patchStrategy=merge - InitContainers []corev1.Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` - // +patchMergeKey=name - // +patchStrategy=merge - Containers []corev1.Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by the workspace Pod. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` - // List of workspace-wide environment variables to set in all containers of the workspace POD. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - CommonEnv []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` -} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go index 84d70e40e27..e99a2dfa9b2 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go @@ -7,36 +7,37 @@ import ( // CommandType describes the type of command. // Only one of the following command type may be specified. -// +kubebuilder:validation:Enum=Exec;Apply;VscodeTask;VscodeLaunch;Composite;Custom +// +kubebuilder:validation:Enum=Exec;Apply;Composite;Custom type CommandType string const ( - ExecCommandType CommandType = "Exec" - ApplyCommandType CommandType = "Apply" - VscodeTaskCommandType CommandType = "VscodeTask" - VscodeLaunchCommandType CommandType = "VscodeLaunch" - CompositeCommandType CommandType = "Composite" - CustomCommandType CommandType = "Custom" + ExecCommandType CommandType = "Exec" + ApplyCommandType CommandType = "Apply" + CompositeCommandType CommandType = "Composite" + CustomCommandType CommandType = "Custom" ) // CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug +// +kubebuilder:validation:Enum=build;run;test;debug;deploy type CommandGroupKind string const ( - BuildCommandGroupKind CommandGroupKind = "build" - RunCommandGroupKind CommandGroupKind = "run" - TestCommandGroupKind CommandGroupKind = "test" - DebugCommandGroupKind CommandGroupKind = "debug" + BuildCommandGroupKind CommandGroupKind = "build" + RunCommandGroupKind CommandGroupKind = "run" + TestCommandGroupKind CommandGroupKind = "test" + DebugCommandGroupKind CommandGroupKind = "debug" + DeployCommandGroupKind CommandGroupKind = "deploy" ) +// +devfile:getter:generate type CommandGroup struct { // Kind of group the command is part of Kind CommandGroupKind `json:"kind"` // +optional // Identifies the default command for a given group kind - IsDefault bool `json:"isDefault,omitempty"` + // +devfile:default:value=false + IsDefault *bool `json:"isDefault,omitempty"` } type BaseCommand struct { @@ -63,13 +64,16 @@ type Command struct { Id string `json:"id"` // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` CommandUnion `json:",inline"` } // +union type CommandUnion struct { - // Type of workspace command + // Type of devworkspace command // +unionDiscriminator // +optional CommandType CommandType `json:"commandType,omitempty"` @@ -79,27 +83,19 @@ type CommandUnion struct { Exec *ExecCommand `json:"exec,omitempty"` // Command that consists in applying a given component definition, - // typically bound to a workspace event. + // typically bound to a devworkspace event. // // For example, when an `apply` command is bound to a `preStart` event, // and references a `container` component, it will start the container as a - // K8S initContainer in the workspace POD, unless the component has its + // K8S initContainer in the devworkspace POD, unless the component has its // `dedicatedPod` field set to `true`. // // When no `apply` command exist for a given component, - // it is assumed the component will be applied at workspace start + // it is assumed the component will be applied at devworkspace start // by default. // +optional Apply *ApplyCommand `json:"apply,omitempty"` - // Command providing the definition of a VsCode Task - // +optional - VscodeTask *VscodeConfigurationCommand `json:"vscodeTask,omitempty"` - - // Command providing the definition of a VsCode launch action - // +optional - VscodeLaunch *VscodeConfigurationCommand `json:"vscodeLaunch,omitempty"` - // Composite command that allows executing several sub-commands // either sequentially or concurrently // +optional @@ -113,6 +109,7 @@ type CommandUnion struct { Custom *CustomCommand `json:"custom,omitempty"` } +// +devfile:getter:generate type ExecCommand struct { LabeledCommand `json:",inline"` @@ -151,7 +148,8 @@ type ExecCommand struct { // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. // // Default value is `false` - HotReloadCapable bool `json:"hotReloadCapable,omitempty"` + // +devfile:default:value=false + HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` } type ApplyCommand struct { @@ -162,6 +160,7 @@ type ApplyCommand struct { Component string `json:"component"` } +// +devfile:getter:generate type CompositeCommand struct { LabeledCommand `json:",inline"` @@ -170,41 +169,8 @@ type CompositeCommand struct { // Indicates if the sub-commands should be executed concurrently // +optional - Parallel bool `json:"parallel,omitempty"` -} - -// VscodeConfigurationCommandLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -// +kubebuilder:validation:Enum=Uri;Inlined -type VscodeConfigurationCommandLocationType string - -const ( - UriVscodeConfigurationCommandLocationType VscodeConfigurationCommandLocationType = "Uri" - InlinedVscodeConfigurationCommandLocationType VscodeConfigurationCommandLocationType = "Inlined" -) - -// +union -type VscodeConfigurationCommandLocation struct { - // Type of Vscode configuration command location - // + - // +unionDiscriminator - // +optional - LocationType VscodeConfigurationCommandLocationType `json:"locationType,omitempty"` - - // Location as an absolute of relative URI - // the VsCode configuration will be fetched from - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined content of the VsCode configuration - // +optional - Inlined string `json:"inlined,omitempty"` -} - -type VscodeConfigurationCommand struct { - BaseCommand `json:",inline"` - VscodeConfigurationCommandLocation `json:",inline"` + // +devfile:default:value=false + Parallel *bool `json:"parallel,omitempty"` } type CustomCommand struct { diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/containerComponent.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go similarity index 86% rename from vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/containerComponent.go rename to vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go index 8e7c7eb91fc..08ded6bb223 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/containerComponent.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go @@ -1,12 +1,13 @@ package v1alpha2 -// Component that allows the developer to add a configured container into his workspace +// Component that allows the developer to add a configured container into their devworkspace type ContainerComponent struct { BaseComponent `json:",inline"` Container `json:",inline"` Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` } +// +devfile:getter:generate type Container struct { Image string `json:"image"` @@ -69,7 +70,20 @@ type Container struct { // // Default value is `false` // +optional - DedicatedPod bool `json:"dedicatedPod,omitempty"` + // +devfile:default:value=false + DedicatedPod *bool `json:"dedicatedPod,omitempty"` +} + +//GetMountSources returns the value of the boolean property. If it's unset, the default value is true for all component types except plugins and components that set `dedicatedPod` to true. +func (in *Container) GetMountSources() bool { + if in.MountSources != nil { + return *in.MountSources + } else { + if in.GetDedicatedPod() { + return false + } + return true + } } type EnvVar struct { diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image.go new file mode 100644 index 00000000000..c6e7e6b7777 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image.go @@ -0,0 +1,38 @@ +package v1alpha2 + +// ImageType describes the type of image. +// Only one of the following image type may be specified. +// +kubebuilder:validation:Enum=Dockerfile +type ImageType string + +const ( + DockerfileImageType ImageType = "Dockerfile" +) + +type BaseImage struct { +} + +// Component that allows the developer to build a runtime image for outerloop +type ImageComponent struct { + BaseComponent `json:",inline"` + Image `json:",inline"` +} + +type Image struct { + // Name of the image for the resulting outerloop build + ImageName string `json:"imageName"` + ImageUnion `json:",inline"` +} + +// +union +type ImageUnion struct { + // Type of image + // + // +unionDiscriminator + // +optional + ImageType ImageType `json:"imageType,omitempty"` + + // Allows specifying dockerfile type build + // +optional + Dockerfile *DockerfileImage `json:"dockerfile,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image_dockerfile.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image_dockerfile.go new file mode 100644 index 00000000000..a200ba1378d --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image_dockerfile.go @@ -0,0 +1,83 @@ +package v1alpha2 + +// DockerfileSrcType describes the type of +// the src for the Dockerfile outerloop build. +// Only one of the following location type may be specified. +// +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git +type DockerfileSrcType string + +const ( + UriLikeDockerfileSrcType DockerfileSrcType = "Uri" + DevfileRegistryLikeDockerfileSrcType DockerfileSrcType = "DevfileRegistry" + GitLikeDockerfileSrcType DockerfileSrcType = "Git" +) + +// Dockerfile Image type to specify the outerloop build using a Dockerfile +type DockerfileImage struct { + BaseImage `json:",inline"` + DockerfileSrc `json:",inline"` + Dockerfile `json:",inline"` +} + +// +union +type DockerfileSrc struct { + // Type of Dockerfile src + // + + // +unionDiscriminator + // +optional + SrcType DockerfileSrcType `json:"srcType,omitempty"` + + // URI Reference of a Dockerfile. + // It can be a full URL or a relative URI from the current devfile as the base URI. + // +optional + Uri string `json:"uri,omitempty"` + + // Dockerfile's Devfile Registry source + // +optional + DevfileRegistry *DockerfileDevfileRegistrySource `json:"devfileRegistry,omitempty"` + + // Dockerfile's Git source + // +optional + Git *DockerfileGitProjectSource `json:"git,omitempty"` +} + +// +devfile:getter:generate +type Dockerfile struct { + // Path of source directory to establish build context. Defaults to ${PROJECT_ROOT} in the container + // +optional + BuildContext string `json:"buildContext,omitempty"` + + // The arguments to supply to the dockerfile build. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Specify if a privileged builder pod is required. + // + // Default value is `false` + // +optional + // +devfile:default:value=false + RootRequired *bool `json:"rootRequired,omitempty"` +} + +type DockerfileDevfileRegistrySource struct { + // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry + // required for the Dockerfile build will be downloaded for building the image. + Id string `json:"id"` + + // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. + // To ensure the Dockerfile gets resolved consistently in different environments, + // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. + // +optional + RegistryUrl string `json:"registryUrl,omitempty"` +} + +type DockerfileGitProjectSource struct { + // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be + // cloned for building the image. + GitProjectSource `json:",inline"` + + // Location of the Dockerfile in the Git repository when using git as Dockerfile src. + // Defaults to Dockerfile. + // +optional + FileLocation string `json:"fileLocation,omitempty"` +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/kubernetesLikeComponent.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go similarity index 96% rename from vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/kubernetesLikeComponent.go rename to vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go index 02a18ed71e5..483735c0fce 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/kubernetesLikeComponent.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go @@ -34,12 +34,12 @@ type K8sLikeComponent struct { Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` } -// Component that allows partly importing Kubernetes resources into the workspace POD +// Component that allows partly importing Kubernetes resources into the devworkspace POD type KubernetesComponent struct { K8sLikeComponent `json:",inline"` } -// Component that allows partly importing Openshift resources into the workspace POD +// Component that allows partly importing Openshift resources into the devworkspace POD type OpenshiftComponent struct { K8sLikeComponent `json:",inline"` } diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/pluginComponents.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go similarity index 100% rename from vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/pluginComponents.go rename to vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/volumeComponent.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go similarity index 57% rename from vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/volumeComponent.go rename to vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go index 06923819812..c3bd6928039 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/volumeComponent.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go @@ -1,14 +1,21 @@ package v1alpha2 -// Component that allows the developer to declare and configure a volume into his workspace +// Component that allows the developer to declare and configure a volume into their devworkspace type VolumeComponent struct { BaseComponent `json:",inline"` Volume `json:",inline"` } // Volume that should be mounted to a component container +// +devfile:getter:generate type Volume struct { // +optional // Size of the volume Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + // +devfile:default:value=false + Ephemeral *bool `json:"ephemeral,omitempty"` } diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go index 20b4b682baa..9356aa65522 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go @@ -7,7 +7,7 @@ import ( // ComponentType describes the type of component. // Only one of the following component type may be specified. -// +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Plugin;Custom +// +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image;Plugin;Custom type ComponentType string const ( @@ -16,11 +16,12 @@ const ( OpenshiftComponentType ComponentType = "Openshift" PluginComponentType ComponentType = "Plugin" VolumeComponentType ComponentType = "Volume" + ImageComponentType ComponentType = "Image" CustomComponentType ComponentType = "Custom" ) -// Workspace component: Anything that will bring additional features / tooling / behaviour / context -// to the workspace, in order to make working in it easier. +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. type BaseComponent struct { } @@ -34,6 +35,9 @@ type Component struct { Name string `json:"name"` // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` ComponentUnion `json:",inline"` } @@ -46,18 +50,18 @@ type ComponentUnion struct { // +optional ComponentType ComponentType `json:"componentType,omitempty"` - // Allows adding and configuring workspace-related containers + // Allows adding and configuring devworkspace-related containers // +optional Container *ContainerComponent `json:"container,omitempty"` - // Allows importing into the workspace the Kubernetes resources + // Allows importing into the devworkspace the Kubernetes resources // defined in a given manifest. For example this allows reusing the Kubernetes // definitions used to deploy some runtime components in production. // // +optional Kubernetes *KubernetesComponent `json:"kubernetes,omitempty"` - // Allows importing into the workspace the OpenShift resources + // Allows importing into the devworkspace the OpenShift resources // defined in a given manifest. For example this allows reusing the OpenShift // definitions used to deploy some runtime components in production. // @@ -69,6 +73,10 @@ type ComponentUnion struct { // +optional Volume *VolumeComponent `json:"volume,omitempty"` + // Allows specifying the definition of an image for outer loop builds + // +optional + Image *ImageComponent `json:"image,omitempty"` + // Allows importing a plugin. // // Plugins are mainly imported devfiles that contribute components, commands diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go index 2d4681521fd..050399a40b3 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go @@ -4,9 +4,9 @@ import ( "github.com/devfile/api/v2/pkg/devfile" ) -// Devfile describes the structure of a cloud-native workspace and development environment. +// Devfile describes the structure of a cloud-native devworkspace and development environment. // +k8s:deepcopy-gen=false -// +devfile:jsonschema:generate:omitCustomUnionMembers=true +// +devfile:jsonschema:generate:omitCustomUnionMembers=true,omitPluginUnionMembers=true type Devfile struct { devfile.DevfileHeader `json:",inline"` diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go index 03055b8b670..c7fe603c50b 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go @@ -14,34 +14,34 @@ type DevWorkspaceSpec struct { // DevWorkspaceStatus defines the observed state of DevWorkspace type DevWorkspaceStatus struct { - // Id of the workspace - WorkspaceId string `json:"workspaceId"` - // URL at which the Worksace Editor can be joined - IdeUrl string `json:"ideUrl,omitempty"` - Phase WorkspacePhase `json:"phase,omitempty"` + // Id of the DevWorkspace + DevWorkspaceId string `json:"devworkspaceId"` + // Main URL for this DevWorkspace + MainUrl string `json:"mainUrl,omitempty"` + Phase DevWorkspacePhase `json:"phase,omitempty"` // Conditions represent the latest available observations of an object's state - Conditions []WorkspaceCondition `json:"conditions,omitempty"` + Conditions []DevWorkspaceCondition `json:"conditions,omitempty"` // Message is a short user-readable message giving additional information // about an object's state Message string `json:"message,omitempty"` } -type WorkspacePhase string +type DevWorkspacePhase string -// Valid workspace Statuses +// Valid devworkspace Statuses const ( - WorkspaceStatusStarting WorkspacePhase = "Starting" - WorkspaceStatusRunning WorkspacePhase = "Running" - WorkspaceStatusStopped WorkspacePhase = "Stopped" - WorkspaceStatusStopping WorkspacePhase = "Stopping" - WorkspaceStatusFailed WorkspacePhase = "Failed" - WorkspaceStatusError WorkspacePhase = "Error" + DevWorkspaceStatusStarting DevWorkspacePhase = "Starting" + DevWorkspaceStatusRunning DevWorkspacePhase = "Running" + DevWorkspaceStatusStopped DevWorkspacePhase = "Stopped" + DevWorkspaceStatusStopping DevWorkspacePhase = "Stopping" + DevWorkspaceStatusFailed DevWorkspacePhase = "Failed" + DevWorkspaceStatusError DevWorkspacePhase = "Error" ) -// WorkspaceCondition contains details for the current condition of this workspace. -type WorkspaceCondition struct { +// DevWorkspaceCondition contains details for the current condition of this devworkspace. +type DevWorkspaceCondition struct { // Type is the type of the condition. - Type WorkspaceConditionType `json:"type"` + Type DevWorkspaceConditionType `json:"type"` // Phase is the status of the condition. // Can be True, False, Unknown. Status corev1.ConditionStatus `json:"status"` @@ -53,16 +53,16 @@ type WorkspaceCondition struct { Message string `json:"message,omitempty"` } -// Types of conditions reported by workspace -type WorkspaceConditionType string +// Types of conditions reported by devworkspace +type DevWorkspaceConditionType string const ( - WorkspaceComponentsReady WorkspaceConditionType = "ComponentsReady" - WorkspaceRoutingReady WorkspaceConditionType = "RoutingReady" - WorkspaceServiceAccountReady WorkspaceConditionType = "ServiceAccountReady" - WorkspaceReady WorkspaceConditionType = "Ready" - WorkspaceFailedStart WorkspaceConditionType = "FailedStart" - WorkspaceError WorkspaceConditionType = "Error" + DevWorkspaceComponentsReady DevWorkspaceConditionType = "ComponentsReady" + DevWorkspaceRoutingReady DevWorkspaceConditionType = "RoutingReady" + DevWorkspaceServiceAccountReady DevWorkspaceConditionType = "ServiceAccountReady" + DevWorkspaceReady DevWorkspaceConditionType = "Ready" + DevWorkspaceFailedStart DevWorkspaceConditionType = "FailedStart" + DevWorkspaceError DevWorkspaceConditionType = "Error" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -70,9 +70,9 @@ const ( // DevWorkspace is the Schema for the devworkspaces API // +kubebuilder:subresource:status // +kubebuilder:resource:path=devworkspaces,scope=Namespaced,shortName=dw -// +kubebuilder:printcolumn:name="Workspace ID",type="string",JSONPath=".status.workspaceId",description="The workspace's unique id" -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The current workspace startup phase" -// +kubebuilder:printcolumn:name="Info",type="string",JSONPath=".status.message",description="Additional information about the workspace" +// +kubebuilder:printcolumn:name="DevWorkspace ID",type="string",JSONPath=".status.devworkspaceId",description="The devworkspace's unique id" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The current devworkspace startup phase" +// +kubebuilder:printcolumn:name="Info",type="string",JSONPath=".status.message",description="Additional information about the devworkspace" // +devfile:jsonschema:generate // +kubebuilder:storageversion type DevWorkspace struct { diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspaceTemplateSpec.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go similarity index 53% rename from vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspaceTemplateSpec.go rename to vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go index 96d10a508e5..f398cc9dddd 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspaceTemplateSpec.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go @@ -1,9 +1,11 @@ package v1alpha2 -// Structure of the workspace. This is also the specification of a workspace template. +import attributes "github.com/devfile/api/v2/pkg/attributes" + +// Structure of the devworkspace. This is also the specification of a devworkspace template. // +devfile:jsonschema:generate type DevWorkspaceTemplateSpec struct { - // Parent workspace template + // Parent devworkspace template // +optional Parent *Parent `json:"parent,omitempty"` @@ -12,7 +14,31 @@ type DevWorkspaceTemplateSpec struct { // +devfile:overrides:generate type DevWorkspaceTemplateSpecContent struct { - // List of the workspace components, such as editor and plugins, + // Map of key-value variables used for string replacement in the devfile. Values can be referenced via {{variable-key}} + // to replace the corresponding value in string fields in the devfile. Replacement cannot be used for + // + // - schemaVersion, metadata, parent source + // + // - element identifiers, e.g. command id, component name, endpoint name, project name + // + // - references to identifiers, e.g. in events, a command's component, container's volume mount name + // + // - string enums, e.g. command group kind, endpoint exposure + // +optional + // +patchStrategy=merge + // +devfile:overrides:include:omitInPlugin=true,description=Overrides of variables encapsulated in a parent devfile. + Variables map[string]string `json:"variables,omitempty" patchStrategy:"merge"` + + // Map of implementation-dependant free-form YAML attributes. + // +optional + // +patchStrategy=merge + // +devfile:overrides:include:omitInPlugin=true,description=Overrides of attributes encapsulated in a parent devfile. + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty" patchStrategy:"merge"` + + // List of the devworkspace components, such as editor and plugins, // user-provided containers, or other types of components // +optional // +patchMergeKey=name @@ -21,7 +47,7 @@ type DevWorkspaceTemplateSpecContent struct { // +devfile:toplevellist Components []Component `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - // Projects worked on in the workspace, containing names and sources locations + // Projects worked on in the devworkspace, containing names and sources locations // +optional // +patchMergeKey=name // +patchStrategy=merge @@ -37,7 +63,7 @@ type DevWorkspaceTemplateSpecContent struct { // +devfile:toplevellist StarterProjects []StarterProject `json:"starterProjects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - // Predefined, ready-to-use, workspace-related commands + // Predefined, ready-to-use, devworkspace-related commands // +optional // +patchMergeKey=id // +patchStrategy=merge diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go index 0a94724e44d..c16dcf897a7 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go @@ -2,5 +2,5 @@ // +k8s:deepcopy-gen=package,register // +k8s:openapi-gen=true // +groupName=workspace.devfile.io -// +devfile:jsonschema:version=2.1.0-alpha +// +devfile:jsonschema:version=2.2.0-alpha package v1alpha2 diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go index d54bb2c9169..3cf10d8ea03 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go @@ -1,8 +1,6 @@ package v1alpha2 -import ( - attributes "github.com/devfile/api/v2/pkg/attributes" -) +import "github.com/devfile/api/v2/pkg/attributes" // EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. // Only one of the following protocols may be specified: http, ws, tcp, udp. @@ -37,15 +35,16 @@ const ( // Endpoint will be exposed on the public network, typically through // a K8S ingress or an OpenShift route PublicEndpointExposure EndpointExposure = "public" - // Endpoint will be exposed internally outside of the main workspace POD, + // Endpoint will be exposed internally outside of the main devworkspace POD, // typically by K8S services, to be consumed by other elements running // on the same cloud internal network. InternalEndpointExposure EndpointExposure = "internal" // Endpoint will not be exposed and will only be accessible - // inside the main workspace POD, on a local address. + // inside the main devworkspace POD, on a local address. NoneEndpointExposure EndpointExposure = "none" ) +// +devfile:getter:generate type Endpoint struct { // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ // +kubebuilder:validation:MaxLength=63 @@ -58,12 +57,12 @@ type Endpoint struct { // - `public` means that the endpoint will be exposed on the public network, typically through // a K8S ingress or an OpenShift route. // - // - `internal` means that the endpoint will be exposed internally outside of the main workspace POD, + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, // typically by K8S services, to be consumed by other elements running // on the same cloud internal network. // // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main workspace POD, on a local address. + // inside the main devworkspace POD, on a local address. // // Default value is `public` // +optional @@ -94,7 +93,8 @@ type Endpoint struct { // Describes whether the endpoint should be secured and protected by some // authentication process. This requires a protocol of `https` or `wss`. // +optional - Secure bool `json:"secure,omitempty"` + // +devfile:default:value=false + Secure *bool `json:"secure,omitempty"` // Path of the endpoint URL // +optional @@ -108,5 +108,8 @@ type Endpoint struct { // // - type: "terminal" / "ide" / "ide-dev", // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` } diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go index b6986ce3252..2a8bd91a584 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go @@ -1,26 +1,26 @@ package v1alpha2 type Events struct { - WorkspaceEvents `json:",inline"` + DevWorkspaceEvents `json:",inline"` } -type WorkspaceEvents struct { - // IDs of commands that should be executed before the workspace start. - // Kubernetes-wise, these commands would typically be executed in init containers of the workspace POD. +type DevWorkspaceEvents struct { + // IDs of commands that should be executed before the devworkspace start. + // Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD. // +optional PreStart []string `json:"preStart,omitempty"` - // IDs of commands that should be executed after the workspace is completely started. + // IDs of commands that should be executed after the devworkspace is completely started. // In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. // This means that those commands are not triggered until the user opens the IDE in his browser. // +optional PostStart []string `json:"postStart,omitempty"` // +optional - // IDs of commands that should be executed before stopping the workspace. + // IDs of commands that should be executed before stopping the devworkspace. PreStop []string `json:"preStop,omitempty"` // +optional - // IDs of commands that should be executed after stopping the workspace. + // IDs of commands that should be executed after stopping the devworkspace. PostStop []string `json:"postStop,omitempty"` } diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/importReference.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go similarity index 78% rename from vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/importReference.go rename to vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go index a1b5bc88f9c..342b7975c19 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/importReference.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go @@ -21,7 +21,8 @@ type ImportReferenceUnion struct { // +optional ImportReferenceType ImportReferenceType `json:"importReferenceType,omitempty"` - // Uri of a Devfile yaml file + // URI Reference of a parent devfile YAML file. + // It can be a full URL or a relative URI with the current devfile as the base URI. // +optional Uri string `json:"uri,omitempty"` @@ -43,6 +44,10 @@ type KubernetesCustomResourceImportReference struct { type ImportReference struct { ImportReferenceUnion `json:",inline"` + + // Registry URL to pull the parent devfile from when using id in the parent reference. + // To ensure the parent devfile gets resolved consistently in different environments, + // it is recommended to always specify the `registryUrl` when `id` is used. // +optional RegistryUrl string `json:"registryUrl,omitempty"` } diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrideDirectives.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go similarity index 100% rename from vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrideDirectives.go rename to vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go index a111ac65016..9348d4d685f 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go @@ -13,16 +13,15 @@ type Project struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. // +optional ClonePath string `json:"clonePath,omitempty"` - // Populate the project sparsely with selected directories. - // +optional - SparseCheckoutDirs []string `json:"sparseCheckoutDirs,omitempty"` - ProjectSource `json:",inline"` } @@ -34,6 +33,9 @@ type StarterProject struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` // Description of a starter project @@ -51,12 +53,11 @@ type StarterProject struct { // Only one of the following project sources may be specified. // If none of the following policies is specified, the default one // is AllowConcurrent. -// +kubebuilder:validation:Enum=Git;Github;Zip;Custom +// +kubebuilder:validation:Enum=Git;Zip;Custom type ProjectSourceType string const ( GitProjectSourceType ProjectSourceType = "Git" - GitHubProjectSourceType ProjectSourceType = "Github" ZipProjectSourceType ProjectSourceType = "Zip" CustomProjectSourceType ProjectSourceType = "Custom" ) @@ -73,10 +74,6 @@ type ProjectSource struct { // +optional Git *GitProjectSource `json:"git,omitempty"` - // Project's GitHub source - // +optional - Github *GithubProjectSource `json:"github,omitempty"` - // Project's Zip source // +optional Zip *ZipProjectSource `json:"zip,omitempty"` @@ -112,7 +109,8 @@ type GitLikeProjectSource struct { // +optional CheckoutFrom *CheckoutFrom `json:"checkoutFrom,omitempty"` - // The remotes map which should be initialized in the git project. Must have at least one remote configured + // The remotes map which should be initialized in the git project. + // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. Remotes map[string]string `json:"remotes"` } @@ -129,7 +127,3 @@ type CheckoutFrom struct { type GitProjectSource struct { GitLikeProjectSource `json:",inline"` } - -type GithubProjectSource struct { - GitLikeProjectSource `json:",inline"` -} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go index 3ee9a3c32fe..ee9afdfbb7f 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go @@ -6,7 +6,6 @@ package v1alpha2 import ( "github.com/devfile/api/v2/pkg/attributes" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -80,7 +79,7 @@ func (in *BaseCommand) DeepCopyInto(out *BaseCommand) { if in.Group != nil { in, out := &in.Group, &out.Group *out = new(CommandGroup) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -100,7 +99,7 @@ func (in *BaseCommandParentOverride) DeepCopyInto(out *BaseCommandParentOverride if in.Group != nil { in, out := &in.Group, &out.Group *out = new(CommandGroupParentOverride) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -120,7 +119,7 @@ func (in *BaseCommandPluginOverride) DeepCopyInto(out *BaseCommandPluginOverride if in.Group != nil { in, out := &in.Group, &out.Group *out = new(CommandGroupPluginOverride) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -140,7 +139,7 @@ func (in *BaseCommandPluginOverrideParentOverride) DeepCopyInto(out *BaseCommand if in.Group != nil { in, out := &in.Group, &out.Group *out = new(CommandGroupPluginOverrideParentOverride) - **out = **in + (*in).DeepCopyInto(*out) } } @@ -214,6 +213,66 @@ func (in *BaseComponentPluginOverrideParentOverride) DeepCopy() *BaseComponentPl return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseImage) DeepCopyInto(out *BaseImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImage. +func (in *BaseImage) DeepCopy() *BaseImage { + if in == nil { + return nil + } + out := new(BaseImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseImageParentOverride) DeepCopyInto(out *BaseImageParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImageParentOverride. +func (in *BaseImageParentOverride) DeepCopy() *BaseImageParentOverride { + if in == nil { + return nil + } + out := new(BaseImageParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseImagePluginOverride) DeepCopyInto(out *BaseImagePluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImagePluginOverride. +func (in *BaseImagePluginOverride) DeepCopy() *BaseImagePluginOverride { + if in == nil { + return nil + } + out := new(BaseImagePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseImagePluginOverrideParentOverride) DeepCopyInto(out *BaseImagePluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImagePluginOverrideParentOverride. +func (in *BaseImagePluginOverrideParentOverride) DeepCopy() *BaseImagePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(BaseImagePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CheckoutFrom) DeepCopyInto(out *CheckoutFrom) { *out = *in @@ -244,6 +303,36 @@ func (in *CheckoutFromParentOverride) DeepCopy() *CheckoutFromParentOverride { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckoutFromPluginOverride) DeepCopyInto(out *CheckoutFromPluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFromPluginOverride. +func (in *CheckoutFromPluginOverride) DeepCopy() *CheckoutFromPluginOverride { + if in == nil { + return nil + } + out := new(CheckoutFromPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CheckoutFromPluginOverrideParentOverride) DeepCopyInto(out *CheckoutFromPluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFromPluginOverrideParentOverride. +func (in *CheckoutFromPluginOverrideParentOverride) DeepCopy() *CheckoutFromPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(CheckoutFromPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Command) DeepCopyInto(out *Command) { *out = *in @@ -270,6 +359,11 @@ func (in *Command) DeepCopy() *Command { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommandGroup) DeepCopyInto(out *CommandGroup) { *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroup. @@ -285,6 +379,11 @@ func (in *CommandGroup) DeepCopy() *CommandGroup { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommandGroupParentOverride) DeepCopyInto(out *CommandGroupParentOverride) { *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupParentOverride. @@ -300,6 +399,11 @@ func (in *CommandGroupParentOverride) DeepCopy() *CommandGroupParentOverride { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommandGroupPluginOverride) DeepCopyInto(out *CommandGroupPluginOverride) { *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupPluginOverride. @@ -315,6 +419,11 @@ func (in *CommandGroupPluginOverride) DeepCopy() *CommandGroupPluginOverride { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommandGroupPluginOverrideParentOverride) DeepCopyInto(out *CommandGroupPluginOverrideParentOverride) { *out = *in + if in.IsDefault != nil { + in, out := &in.IsDefault, &out.IsDefault + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupPluginOverrideParentOverride. @@ -409,16 +518,6 @@ func (in *CommandUnion) DeepCopyInto(out *CommandUnion) { *out = new(ApplyCommand) (*in).DeepCopyInto(*out) } - if in.VscodeTask != nil { - in, out := &in.VscodeTask, &out.VscodeTask - *out = new(VscodeConfigurationCommand) - (*in).DeepCopyInto(*out) - } - if in.VscodeLaunch != nil { - in, out := &in.VscodeLaunch, &out.VscodeLaunch - *out = new(VscodeConfigurationCommand) - (*in).DeepCopyInto(*out) - } if in.Composite != nil { in, out := &in.Composite, &out.Composite *out = new(CompositeCommand) @@ -454,16 +553,6 @@ func (in *CommandUnionParentOverride) DeepCopyInto(out *CommandUnionParentOverri *out = new(ApplyCommandParentOverride) (*in).DeepCopyInto(*out) } - if in.VscodeTask != nil { - in, out := &in.VscodeTask, &out.VscodeTask - *out = new(VscodeConfigurationCommandParentOverride) - (*in).DeepCopyInto(*out) - } - if in.VscodeLaunch != nil { - in, out := &in.VscodeLaunch, &out.VscodeLaunch - *out = new(VscodeConfigurationCommandParentOverride) - (*in).DeepCopyInto(*out) - } if in.Composite != nil { in, out := &in.Composite, &out.Composite *out = new(CompositeCommandParentOverride) @@ -494,16 +583,6 @@ func (in *CommandUnionPluginOverride) DeepCopyInto(out *CommandUnionPluginOverri *out = new(ApplyCommandPluginOverride) (*in).DeepCopyInto(*out) } - if in.VscodeTask != nil { - in, out := &in.VscodeTask, &out.VscodeTask - *out = new(VscodeConfigurationCommandPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.VscodeLaunch != nil { - in, out := &in.VscodeLaunch, &out.VscodeLaunch - *out = new(VscodeConfigurationCommandPluginOverride) - (*in).DeepCopyInto(*out) - } if in.Composite != nil { in, out := &in.Composite, &out.Composite *out = new(CompositeCommandPluginOverride) @@ -534,16 +613,6 @@ func (in *CommandUnionPluginOverrideParentOverride) DeepCopyInto(out *CommandUni *out = new(ApplyCommandPluginOverrideParentOverride) (*in).DeepCopyInto(*out) } - if in.VscodeTask != nil { - in, out := &in.VscodeTask, &out.VscodeTask - *out = new(VscodeConfigurationCommandPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.VscodeLaunch != nil { - in, out := &in.VscodeLaunch, &out.VscodeLaunch - *out = new(VscodeConfigurationCommandPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } if in.Composite != nil { in, out := &in.Composite, &out.Composite *out = new(CompositeCommandPluginOverrideParentOverride) @@ -591,6 +660,36 @@ func (in *CommonProjectSourceParentOverride) DeepCopy() *CommonProjectSourcePare return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonProjectSourcePluginOverride) DeepCopyInto(out *CommonProjectSourcePluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSourcePluginOverride. +func (in *CommonProjectSourcePluginOverride) DeepCopy() *CommonProjectSourcePluginOverride { + if in == nil { + return nil + } + out := new(CommonProjectSourcePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *CommonProjectSourcePluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSourcePluginOverrideParentOverride. +func (in *CommonProjectSourcePluginOverrideParentOverride) DeepCopy() *CommonProjectSourcePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(CommonProjectSourcePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Component) DeepCopyInto(out *Component) { *out = *in @@ -704,7 +803,12 @@ func (in *ComponentUnion) DeepCopyInto(out *ComponentUnion) { if in.Volume != nil { in, out := &in.Volume, &out.Volume *out = new(VolumeComponent) - **out = **in + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageComponent) + (*in).DeepCopyInto(*out) } if in.Plugin != nil { in, out := &in.Plugin, &out.Plugin @@ -749,7 +853,12 @@ func (in *ComponentUnionParentOverride) DeepCopyInto(out *ComponentUnionParentOv if in.Volume != nil { in, out := &in.Volume, &out.Volume *out = new(VolumeComponentParentOverride) - **out = **in + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageComponentParentOverride) + (*in).DeepCopyInto(*out) } if in.Plugin != nil { in, out := &in.Plugin, &out.Plugin @@ -789,7 +898,12 @@ func (in *ComponentUnionPluginOverride) DeepCopyInto(out *ComponentUnionPluginOv if in.Volume != nil { in, out := &in.Volume, &out.Volume *out = new(VolumeComponentPluginOverride) - **out = **in + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageComponentPluginOverride) + (*in).DeepCopyInto(*out) } } @@ -824,7 +938,12 @@ func (in *ComponentUnionPluginOverrideParentOverride) DeepCopyInto(out *Componen if in.Volume != nil { in, out := &in.Volume, &out.Volume *out = new(VolumeComponentPluginOverrideParentOverride) - **out = **in + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageComponentPluginOverrideParentOverride) + (*in).DeepCopyInto(*out) } } @@ -847,6 +966,11 @@ func (in *CompositeCommand) DeepCopyInto(out *CompositeCommand) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Parallel != nil { + in, out := &in.Parallel, &out.Parallel + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommand. @@ -868,6 +992,11 @@ func (in *CompositeCommandParentOverride) DeepCopyInto(out *CompositeCommandPare *out = make([]string, len(*in)) copy(*out, *in) } + if in.Parallel != nil { + in, out := &in.Parallel, &out.Parallel + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandParentOverride. @@ -889,6 +1018,11 @@ func (in *CompositeCommandPluginOverride) DeepCopyInto(out *CompositeCommandPlug *out = make([]string, len(*in)) copy(*out, *in) } + if in.Parallel != nil { + in, out := &in.Parallel, &out.Parallel + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandPluginOverride. @@ -910,6 +1044,11 @@ func (in *CompositeCommandPluginOverrideParentOverride) DeepCopyInto(out *Compos *out = make([]string, len(*in)) copy(*out, *in) } + if in.Parallel != nil { + in, out := &in.Parallel, &out.Parallel + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandPluginOverrideParentOverride. @@ -950,6 +1089,11 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(bool) **out = **in } + if in.DedicatedPod != nil { + in, out := &in.DedicatedPod, &out.DedicatedPod + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. @@ -1086,6 +1230,11 @@ func (in *ContainerParentOverride) DeepCopyInto(out *ContainerParentOverride) { *out = new(bool) **out = **in } + if in.DedicatedPod != nil { + in, out := &in.DedicatedPod, &out.DedicatedPod + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParentOverride. @@ -1126,6 +1275,11 @@ func (in *ContainerPluginOverride) DeepCopyInto(out *ContainerPluginOverride) { *out = new(bool) **out = **in } + if in.DedicatedPod != nil { + in, out := &in.DedicatedPod, &out.DedicatedPod + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPluginOverride. @@ -1166,6 +1320,11 @@ func (in *ContainerPluginOverrideParentOverride) DeepCopyInto(out *ContainerPlug *out = new(bool) **out = **in } + if in.DedicatedPod != nil { + in, out := &in.DedicatedPod, &out.DedicatedPod + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPluginOverrideParentOverride. @@ -1254,6 +1413,57 @@ func (in *DevWorkspace) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceCondition) DeepCopyInto(out *DevWorkspaceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceCondition. +func (in *DevWorkspaceCondition) DeepCopy() *DevWorkspaceCondition { + if in == nil { + return nil + } + out := new(DevWorkspaceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevWorkspaceEvents) DeepCopyInto(out *DevWorkspaceEvents) { + *out = *in + if in.PreStart != nil { + in, out := &in.PreStart, &out.PreStart + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PostStop != nil { + in, out := &in.PostStop, &out.PostStop + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceEvents. +func (in *DevWorkspaceEvents) DeepCopy() *DevWorkspaceEvents { + if in == nil { + return nil + } + out := new(DevWorkspaceEvents) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DevWorkspaceList) DeepCopyInto(out *DevWorkspaceList) { *out = *in @@ -1307,7 +1517,7 @@ func (in *DevWorkspaceStatus) DeepCopyInto(out *DevWorkspaceStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]WorkspaceCondition, len(*in)) + *out = make([]DevWorkspaceCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1406,6 +1616,20 @@ func (in *DevWorkspaceTemplateSpec) DeepCopy() *DevWorkspaceTemplateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DevWorkspaceTemplateSpecContent) DeepCopyInto(out *DevWorkspaceTemplateSpecContent) { *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } if in.Components != nil { in, out := &in.Components, &out.Components *out = make([]Component, len(*in)) @@ -1452,62 +1676,473 @@ func (in *DevWorkspaceTemplateSpecContent) DeepCopy() *DevWorkspaceTemplateSpecC } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoint) DeepCopyInto(out *Endpoint) { +func (in *Dockerfile) DeepCopyInto(out *Dockerfile) { *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RootRequired != nil { + in, out := &in.RootRequired, &out.RootRequired + *out = new(bool) + **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. -func (in *Endpoint) DeepCopy() *Endpoint { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dockerfile. +func (in *Dockerfile) DeepCopy() *Dockerfile { if in == nil { return nil } - out := new(Endpoint) + out := new(Dockerfile) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointParentOverride) DeepCopyInto(out *EndpointParentOverride) { +func (in *DockerfileDevfileRegistrySource) DeepCopyInto(out *DockerfileDevfileRegistrySource) { *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParentOverride. -func (in *EndpointParentOverride) DeepCopy() *EndpointParentOverride { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySource. +func (in *DockerfileDevfileRegistrySource) DeepCopy() *DockerfileDevfileRegistrySource { if in == nil { return nil } - out := new(EndpointParentOverride) + out := new(DockerfileDevfileRegistrySource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPluginOverride) DeepCopyInto(out *EndpointPluginOverride) { +func (in *DockerfileDevfileRegistrySourceParentOverride) DeepCopyInto(out *DockerfileDevfileRegistrySourceParentOverride) { *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPluginOverride. +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySourceParentOverride. +func (in *DockerfileDevfileRegistrySourceParentOverride) DeepCopy() *DockerfileDevfileRegistrySourceParentOverride { + if in == nil { + return nil + } + out := new(DockerfileDevfileRegistrySourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileDevfileRegistrySourcePluginOverride) DeepCopyInto(out *DockerfileDevfileRegistrySourcePluginOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySourcePluginOverride. +func (in *DockerfileDevfileRegistrySourcePluginOverride) DeepCopy() *DockerfileDevfileRegistrySourcePluginOverride { + if in == nil { + return nil + } + out := new(DockerfileDevfileRegistrySourcePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileDevfileRegistrySourcePluginOverrideParentOverride) DeepCopyInto(out *DockerfileDevfileRegistrySourcePluginOverrideParentOverride) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySourcePluginOverrideParentOverride. +func (in *DockerfileDevfileRegistrySourcePluginOverrideParentOverride) DeepCopy() *DockerfileDevfileRegistrySourcePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(DockerfileDevfileRegistrySourcePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileGitProjectSource) DeepCopyInto(out *DockerfileGitProjectSource) { + *out = *in + in.GitProjectSource.DeepCopyInto(&out.GitProjectSource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSource. +func (in *DockerfileGitProjectSource) DeepCopy() *DockerfileGitProjectSource { + if in == nil { + return nil + } + out := new(DockerfileGitProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileGitProjectSourceParentOverride) DeepCopyInto(out *DockerfileGitProjectSourceParentOverride) { + *out = *in + in.GitProjectSourceParentOverride.DeepCopyInto(&out.GitProjectSourceParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSourceParentOverride. +func (in *DockerfileGitProjectSourceParentOverride) DeepCopy() *DockerfileGitProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(DockerfileGitProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileGitProjectSourcePluginOverride) DeepCopyInto(out *DockerfileGitProjectSourcePluginOverride) { + *out = *in + in.GitProjectSourcePluginOverride.DeepCopyInto(&out.GitProjectSourcePluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSourcePluginOverride. +func (in *DockerfileGitProjectSourcePluginOverride) DeepCopy() *DockerfileGitProjectSourcePluginOverride { + if in == nil { + return nil + } + out := new(DockerfileGitProjectSourcePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileGitProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *DockerfileGitProjectSourcePluginOverrideParentOverride) { + *out = *in + in.GitProjectSourcePluginOverrideParentOverride.DeepCopyInto(&out.GitProjectSourcePluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSourcePluginOverrideParentOverride. +func (in *DockerfileGitProjectSourcePluginOverrideParentOverride) DeepCopy() *DockerfileGitProjectSourcePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(DockerfileGitProjectSourcePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileImage) DeepCopyInto(out *DockerfileImage) { + *out = *in + out.BaseImage = in.BaseImage + in.DockerfileSrc.DeepCopyInto(&out.DockerfileSrc) + in.Dockerfile.DeepCopyInto(&out.Dockerfile) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImage. +func (in *DockerfileImage) DeepCopy() *DockerfileImage { + if in == nil { + return nil + } + out := new(DockerfileImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileImageParentOverride) DeepCopyInto(out *DockerfileImageParentOverride) { + *out = *in + out.BaseImageParentOverride = in.BaseImageParentOverride + in.DockerfileSrcParentOverride.DeepCopyInto(&out.DockerfileSrcParentOverride) + in.DockerfileParentOverride.DeepCopyInto(&out.DockerfileParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImageParentOverride. +func (in *DockerfileImageParentOverride) DeepCopy() *DockerfileImageParentOverride { + if in == nil { + return nil + } + out := new(DockerfileImageParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileImagePluginOverride) DeepCopyInto(out *DockerfileImagePluginOverride) { + *out = *in + out.BaseImagePluginOverride = in.BaseImagePluginOverride + in.DockerfileSrcPluginOverride.DeepCopyInto(&out.DockerfileSrcPluginOverride) + in.DockerfilePluginOverride.DeepCopyInto(&out.DockerfilePluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImagePluginOverride. +func (in *DockerfileImagePluginOverride) DeepCopy() *DockerfileImagePluginOverride { + if in == nil { + return nil + } + out := new(DockerfileImagePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileImagePluginOverrideParentOverride) DeepCopyInto(out *DockerfileImagePluginOverrideParentOverride) { + *out = *in + out.BaseImagePluginOverrideParentOverride = in.BaseImagePluginOverrideParentOverride + in.DockerfileSrcPluginOverrideParentOverride.DeepCopyInto(&out.DockerfileSrcPluginOverrideParentOverride) + in.DockerfilePluginOverrideParentOverride.DeepCopyInto(&out.DockerfilePluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImagePluginOverrideParentOverride. +func (in *DockerfileImagePluginOverrideParentOverride) DeepCopy() *DockerfileImagePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(DockerfileImagePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileParentOverride) DeepCopyInto(out *DockerfileParentOverride) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RootRequired != nil { + in, out := &in.RootRequired, &out.RootRequired + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileParentOverride. +func (in *DockerfileParentOverride) DeepCopy() *DockerfileParentOverride { + if in == nil { + return nil + } + out := new(DockerfileParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfilePluginOverride) DeepCopyInto(out *DockerfilePluginOverride) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RootRequired != nil { + in, out := &in.RootRequired, &out.RootRequired + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfilePluginOverride. +func (in *DockerfilePluginOverride) DeepCopy() *DockerfilePluginOverride { + if in == nil { + return nil + } + out := new(DockerfilePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfilePluginOverrideParentOverride) DeepCopyInto(out *DockerfilePluginOverrideParentOverride) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RootRequired != nil { + in, out := &in.RootRequired, &out.RootRequired + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfilePluginOverrideParentOverride. +func (in *DockerfilePluginOverrideParentOverride) DeepCopy() *DockerfilePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(DockerfilePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileSrc) DeepCopyInto(out *DockerfileSrc) { + *out = *in + if in.DevfileRegistry != nil { + in, out := &in.DevfileRegistry, &out.DevfileRegistry + *out = new(DockerfileDevfileRegistrySource) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(DockerfileGitProjectSource) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrc. +func (in *DockerfileSrc) DeepCopy() *DockerfileSrc { + if in == nil { + return nil + } + out := new(DockerfileSrc) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileSrcParentOverride) DeepCopyInto(out *DockerfileSrcParentOverride) { + *out = *in + if in.DevfileRegistry != nil { + in, out := &in.DevfileRegistry, &out.DevfileRegistry + *out = new(DockerfileDevfileRegistrySourceParentOverride) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(DockerfileGitProjectSourceParentOverride) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrcParentOverride. +func (in *DockerfileSrcParentOverride) DeepCopy() *DockerfileSrcParentOverride { + if in == nil { + return nil + } + out := new(DockerfileSrcParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileSrcPluginOverride) DeepCopyInto(out *DockerfileSrcPluginOverride) { + *out = *in + if in.DevfileRegistry != nil { + in, out := &in.DevfileRegistry, &out.DevfileRegistry + *out = new(DockerfileDevfileRegistrySourcePluginOverride) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(DockerfileGitProjectSourcePluginOverride) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrcPluginOverride. +func (in *DockerfileSrcPluginOverride) DeepCopy() *DockerfileSrcPluginOverride { + if in == nil { + return nil + } + out := new(DockerfileSrcPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerfileSrcPluginOverrideParentOverride) DeepCopyInto(out *DockerfileSrcPluginOverrideParentOverride) { + *out = *in + if in.DevfileRegistry != nil { + in, out := &in.DevfileRegistry, &out.DevfileRegistry + *out = new(DockerfileDevfileRegistrySourcePluginOverrideParentOverride) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(DockerfileGitProjectSourcePluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrcPluginOverrideParentOverride. +func (in *DockerfileSrcPluginOverrideParentOverride) DeepCopy() *DockerfileSrcPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(DockerfileSrcPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Secure != nil { + in, out := &in.Secure, &out.Secure + *out = new(bool) + **out = **in + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointParentOverride) DeepCopyInto(out *EndpointParentOverride) { + *out = *in + if in.Secure != nil { + in, out := &in.Secure, &out.Secure + *out = new(bool) + **out = **in + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParentOverride. +func (in *EndpointParentOverride) DeepCopy() *EndpointParentOverride { + if in == nil { + return nil + } + out := new(EndpointParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPluginOverride) DeepCopyInto(out *EndpointPluginOverride) { + *out = *in + if in.Secure != nil { + in, out := &in.Secure, &out.Secure + *out = new(bool) + **out = **in + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPluginOverride. func (in *EndpointPluginOverride) DeepCopy() *EndpointPluginOverride { if in == nil { return nil @@ -1520,6 +2155,11 @@ func (in *EndpointPluginOverride) DeepCopy() *EndpointPluginOverride { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndpointPluginOverrideParentOverride) DeepCopyInto(out *EndpointPluginOverrideParentOverride) { *out = *in + if in.Secure != nil { + in, out := &in.Secure, &out.Secure + *out = new(bool) + **out = **in + } if in.Attributes != nil { in, out := &in.Attributes, &out.Attributes *out = make(attributes.Attributes, len(*in)) @@ -1602,7 +2242,7 @@ func (in *EnvVarPluginOverrideParentOverride) DeepCopy() *EnvVarPluginOverridePa // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Events) DeepCopyInto(out *Events) { *out = *in - in.WorkspaceEvents.DeepCopyInto(&out.WorkspaceEvents) + in.DevWorkspaceEvents.DeepCopyInto(&out.DevWorkspaceEvents) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Events. @@ -1624,6 +2264,11 @@ func (in *ExecCommand) DeepCopyInto(out *ExecCommand) { *out = make([]EnvVar, len(*in)) copy(*out, *in) } + if in.HotReloadCapable != nil { + in, out := &in.HotReloadCapable, &out.HotReloadCapable + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommand. @@ -1645,6 +2290,11 @@ func (in *ExecCommandParentOverride) DeepCopyInto(out *ExecCommandParentOverride *out = make([]EnvVarParentOverride, len(*in)) copy(*out, *in) } + if in.HotReloadCapable != nil { + in, out := &in.HotReloadCapable, &out.HotReloadCapable + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandParentOverride. @@ -1666,6 +2316,11 @@ func (in *ExecCommandPluginOverride) DeepCopyInto(out *ExecCommandPluginOverride *out = make([]EnvVarPluginOverride, len(*in)) copy(*out, *in) } + if in.HotReloadCapable != nil { + in, out := &in.HotReloadCapable, &out.HotReloadCapable + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandPluginOverride. @@ -1687,134 +2342,407 @@ func (in *ExecCommandPluginOverrideParentOverride) DeepCopyInto(out *ExecCommand *out = make([]EnvVarPluginOverrideParentOverride, len(*in)) copy(*out, *in) } + if in.HotReloadCapable != nil { + in, out := &in.HotReloadCapable, &out.HotReloadCapable + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandPluginOverrideParentOverride. +func (in *ExecCommandPluginOverrideParentOverride) DeepCopy() *ExecCommandPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ExecCommandPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLikeProjectSource) DeepCopyInto(out *GitLikeProjectSource) { + *out = *in + out.CommonProjectSource = in.CommonProjectSource + if in.CheckoutFrom != nil { + in, out := &in.CheckoutFrom, &out.CheckoutFrom + *out = new(CheckoutFrom) + **out = **in + } + if in.Remotes != nil { + in, out := &in.Remotes, &out.Remotes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSource. +func (in *GitLikeProjectSource) DeepCopy() *GitLikeProjectSource { + if in == nil { + return nil + } + out := new(GitLikeProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLikeProjectSourceParentOverride) DeepCopyInto(out *GitLikeProjectSourceParentOverride) { + *out = *in + out.CommonProjectSourceParentOverride = in.CommonProjectSourceParentOverride + if in.CheckoutFrom != nil { + in, out := &in.CheckoutFrom, &out.CheckoutFrom + *out = new(CheckoutFromParentOverride) + **out = **in + } + if in.Remotes != nil { + in, out := &in.Remotes, &out.Remotes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourceParentOverride. +func (in *GitLikeProjectSourceParentOverride) DeepCopy() *GitLikeProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(GitLikeProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLikeProjectSourcePluginOverride) DeepCopyInto(out *GitLikeProjectSourcePluginOverride) { + *out = *in + out.CommonProjectSourcePluginOverride = in.CommonProjectSourcePluginOverride + if in.CheckoutFrom != nil { + in, out := &in.CheckoutFrom, &out.CheckoutFrom + *out = new(CheckoutFromPluginOverride) + **out = **in + } + if in.Remotes != nil { + in, out := &in.Remotes, &out.Remotes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourcePluginOverride. +func (in *GitLikeProjectSourcePluginOverride) DeepCopy() *GitLikeProjectSourcePluginOverride { + if in == nil { + return nil + } + out := new(GitLikeProjectSourcePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLikeProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *GitLikeProjectSourcePluginOverrideParentOverride) { + *out = *in + out.CommonProjectSourcePluginOverrideParentOverride = in.CommonProjectSourcePluginOverrideParentOverride + if in.CheckoutFrom != nil { + in, out := &in.CheckoutFrom, &out.CheckoutFrom + *out = new(CheckoutFromPluginOverrideParentOverride) + **out = **in + } + if in.Remotes != nil { + in, out := &in.Remotes, &out.Remotes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourcePluginOverrideParentOverride. +func (in *GitLikeProjectSourcePluginOverrideParentOverride) DeepCopy() *GitLikeProjectSourcePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(GitLikeProjectSourcePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitProjectSource) DeepCopyInto(out *GitProjectSource) { + *out = *in + in.GitLikeProjectSource.DeepCopyInto(&out.GitLikeProjectSource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSource. +func (in *GitProjectSource) DeepCopy() *GitProjectSource { + if in == nil { + return nil + } + out := new(GitProjectSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitProjectSourceParentOverride) DeepCopyInto(out *GitProjectSourceParentOverride) { + *out = *in + in.GitLikeProjectSourceParentOverride.DeepCopyInto(&out.GitLikeProjectSourceParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourceParentOverride. +func (in *GitProjectSourceParentOverride) DeepCopy() *GitProjectSourceParentOverride { + if in == nil { + return nil + } + out := new(GitProjectSourceParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitProjectSourcePluginOverride) DeepCopyInto(out *GitProjectSourcePluginOverride) { + *out = *in + in.GitLikeProjectSourcePluginOverride.DeepCopyInto(&out.GitLikeProjectSourcePluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourcePluginOverride. +func (in *GitProjectSourcePluginOverride) DeepCopy() *GitProjectSourcePluginOverride { + if in == nil { + return nil + } + out := new(GitProjectSourcePluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *GitProjectSourcePluginOverrideParentOverride) { + *out = *in + in.GitLikeProjectSourcePluginOverrideParentOverride.DeepCopyInto(&out.GitLikeProjectSourcePluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourcePluginOverrideParentOverride. +func (in *GitProjectSourcePluginOverrideParentOverride) DeepCopy() *GitProjectSourcePluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(GitProjectSourcePluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + in.ImageUnion.DeepCopyInto(&out.ImageUnion) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageComponent) DeepCopyInto(out *ImageComponent) { + *out = *in + out.BaseComponent = in.BaseComponent + in.Image.DeepCopyInto(&out.Image) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponent. +func (in *ImageComponent) DeepCopy() *ImageComponent { + if in == nil { + return nil + } + out := new(ImageComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageComponentParentOverride) DeepCopyInto(out *ImageComponentParentOverride) { + *out = *in + out.BaseComponentParentOverride = in.BaseComponentParentOverride + in.ImageParentOverride.DeepCopyInto(&out.ImageParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponentParentOverride. +func (in *ImageComponentParentOverride) DeepCopy() *ImageComponentParentOverride { + if in == nil { + return nil + } + out := new(ImageComponentParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageComponentPluginOverride) DeepCopyInto(out *ImageComponentPluginOverride) { + *out = *in + out.BaseComponentPluginOverride = in.BaseComponentPluginOverride + in.ImagePluginOverride.DeepCopyInto(&out.ImagePluginOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponentPluginOverride. +func (in *ImageComponentPluginOverride) DeepCopy() *ImageComponentPluginOverride { + if in == nil { + return nil + } + out := new(ImageComponentPluginOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageComponentPluginOverrideParentOverride) DeepCopyInto(out *ImageComponentPluginOverrideParentOverride) { + *out = *in + out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride + in.ImagePluginOverrideParentOverride.DeepCopyInto(&out.ImagePluginOverrideParentOverride) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponentPluginOverrideParentOverride. +func (in *ImageComponentPluginOverrideParentOverride) DeepCopy() *ImageComponentPluginOverrideParentOverride { + if in == nil { + return nil + } + out := new(ImageComponentPluginOverrideParentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageParentOverride) DeepCopyInto(out *ImageParentOverride) { + *out = *in + in.ImageUnionParentOverride.DeepCopyInto(&out.ImageUnionParentOverride) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandPluginOverrideParentOverride. -func (in *ExecCommandPluginOverrideParentOverride) DeepCopy() *ExecCommandPluginOverrideParentOverride { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageParentOverride. +func (in *ImageParentOverride) DeepCopy() *ImageParentOverride { if in == nil { return nil } - out := new(ExecCommandPluginOverrideParentOverride) + out := new(ImageParentOverride) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitLikeProjectSource) DeepCopyInto(out *GitLikeProjectSource) { +func (in *ImagePluginOverride) DeepCopyInto(out *ImagePluginOverride) { *out = *in - out.CommonProjectSource = in.CommonProjectSource - if in.CheckoutFrom != nil { - in, out := &in.CheckoutFrom, &out.CheckoutFrom - *out = new(CheckoutFrom) - **out = **in - } - if in.Remotes != nil { - in, out := &in.Remotes, &out.Remotes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } + in.ImageUnionPluginOverride.DeepCopyInto(&out.ImageUnionPluginOverride) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSource. -func (in *GitLikeProjectSource) DeepCopy() *GitLikeProjectSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePluginOverride. +func (in *ImagePluginOverride) DeepCopy() *ImagePluginOverride { if in == nil { return nil } - out := new(GitLikeProjectSource) + out := new(ImagePluginOverride) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitLikeProjectSourceParentOverride) DeepCopyInto(out *GitLikeProjectSourceParentOverride) { +func (in *ImagePluginOverrideParentOverride) DeepCopyInto(out *ImagePluginOverrideParentOverride) { *out = *in - out.CommonProjectSourceParentOverride = in.CommonProjectSourceParentOverride - if in.CheckoutFrom != nil { - in, out := &in.CheckoutFrom, &out.CheckoutFrom - *out = new(CheckoutFromParentOverride) - **out = **in - } - if in.Remotes != nil { - in, out := &in.Remotes, &out.Remotes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } + in.ImageUnionPluginOverrideParentOverride.DeepCopyInto(&out.ImageUnionPluginOverrideParentOverride) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourceParentOverride. -func (in *GitLikeProjectSourceParentOverride) DeepCopy() *GitLikeProjectSourceParentOverride { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePluginOverrideParentOverride. +func (in *ImagePluginOverrideParentOverride) DeepCopy() *ImagePluginOverrideParentOverride { if in == nil { return nil } - out := new(GitLikeProjectSourceParentOverride) + out := new(ImagePluginOverrideParentOverride) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitProjectSource) DeepCopyInto(out *GitProjectSource) { +func (in *ImageUnion) DeepCopyInto(out *ImageUnion) { *out = *in - in.GitLikeProjectSource.DeepCopyInto(&out.GitLikeProjectSource) + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(DockerfileImage) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSource. -func (in *GitProjectSource) DeepCopy() *GitProjectSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnion. +func (in *ImageUnion) DeepCopy() *ImageUnion { if in == nil { return nil } - out := new(GitProjectSource) + out := new(ImageUnion) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitProjectSourceParentOverride) DeepCopyInto(out *GitProjectSourceParentOverride) { +func (in *ImageUnionParentOverride) DeepCopyInto(out *ImageUnionParentOverride) { *out = *in - in.GitLikeProjectSourceParentOverride.DeepCopyInto(&out.GitLikeProjectSourceParentOverride) + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(DockerfileImageParentOverride) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourceParentOverride. -func (in *GitProjectSourceParentOverride) DeepCopy() *GitProjectSourceParentOverride { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnionParentOverride. +func (in *ImageUnionParentOverride) DeepCopy() *ImageUnionParentOverride { if in == nil { return nil } - out := new(GitProjectSourceParentOverride) + out := new(ImageUnionParentOverride) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GithubProjectSource) DeepCopyInto(out *GithubProjectSource) { +func (in *ImageUnionPluginOverride) DeepCopyInto(out *ImageUnionPluginOverride) { *out = *in - in.GitLikeProjectSource.DeepCopyInto(&out.GitLikeProjectSource) + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(DockerfileImagePluginOverride) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubProjectSource. -func (in *GithubProjectSource) DeepCopy() *GithubProjectSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnionPluginOverride. +func (in *ImageUnionPluginOverride) DeepCopy() *ImageUnionPluginOverride { if in == nil { return nil } - out := new(GithubProjectSource) + out := new(ImageUnionPluginOverride) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GithubProjectSourceParentOverride) DeepCopyInto(out *GithubProjectSourceParentOverride) { +func (in *ImageUnionPluginOverrideParentOverride) DeepCopyInto(out *ImageUnionPluginOverrideParentOverride) { *out = *in - in.GitLikeProjectSourceParentOverride.DeepCopyInto(&out.GitLikeProjectSourceParentOverride) + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(DockerfileImagePluginOverrideParentOverride) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubProjectSourceParentOverride. -func (in *GithubProjectSourceParentOverride) DeepCopy() *GithubProjectSourceParentOverride { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnionPluginOverrideParentOverride. +func (in *ImageUnionPluginOverrideParentOverride) DeepCopy() *ImageUnionPluginOverrideParentOverride { if in == nil { return nil } - out := new(GithubProjectSourceParentOverride) + out := new(ImageUnionPluginOverrideParentOverride) in.DeepCopyInto(out) return out } @@ -2345,6 +3273,20 @@ func (in *Parent) DeepCopy() *Parent { func (in *ParentOverrides) DeepCopyInto(out *ParentOverrides) { *out = *in out.OverridesBase = in.OverridesBase + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(attributes.Attributes, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } if in.Components != nil { in, out := &in.Components, &out.Components *out = make([]ComponentParentOverride, len(*in)) @@ -2491,11 +3433,6 @@ func (in *Project) DeepCopyInto(out *Project) { (*out)[key] = *val.DeepCopy() } } - if in.SparseCheckoutDirs != nil { - in, out := &in.SparseCheckoutDirs, &out.SparseCheckoutDirs - *out = make([]string, len(*in)) - copy(*out, *in) - } in.ProjectSource.DeepCopyInto(&out.ProjectSource) } @@ -2519,11 +3456,6 @@ func (in *ProjectParentOverride) DeepCopyInto(out *ProjectParentOverride) { (*out)[key] = *val.DeepCopy() } } - if in.SparseCheckoutDirs != nil { - in, out := &in.SparseCheckoutDirs, &out.SparseCheckoutDirs - *out = make([]string, len(*in)) - copy(*out, *in) - } in.ProjectSourceParentOverride.DeepCopyInto(&out.ProjectSourceParentOverride) } @@ -2545,11 +3477,6 @@ func (in *ProjectSource) DeepCopyInto(out *ProjectSource) { *out = new(GitProjectSource) (*in).DeepCopyInto(*out) } - if in.Github != nil { - in, out := &in.Github, &out.Github - *out = new(GithubProjectSource) - (*in).DeepCopyInto(*out) - } if in.Zip != nil { in, out := &in.Zip, &out.Zip *out = new(ZipProjectSource) @@ -2580,11 +3507,6 @@ func (in *ProjectSourceParentOverride) DeepCopyInto(out *ProjectSourceParentOver *out = new(GitProjectSourceParentOverride) (*in).DeepCopyInto(*out) } - if in.Github != nil { - in, out := &in.Github, &out.Github - *out = new(GithubProjectSourceParentOverride) - (*in).DeepCopyInto(*out) - } if in.Zip != nil { in, out := &in.Zip, &out.Zip *out = new(ZipProjectSourceParentOverride) @@ -2651,6 +3573,11 @@ func (in *StarterProjectParentOverride) DeepCopy() *StarterProjectParentOverride // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. @@ -2667,7 +3594,7 @@ func (in *Volume) DeepCopy() *Volume { func (in *VolumeComponent) DeepCopyInto(out *VolumeComponent) { *out = *in out.BaseComponent = in.BaseComponent - out.Volume = in.Volume + in.Volume.DeepCopyInto(&out.Volume) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponent. @@ -2684,7 +3611,7 @@ func (in *VolumeComponent) DeepCopy() *VolumeComponent { func (in *VolumeComponentParentOverride) DeepCopyInto(out *VolumeComponentParentOverride) { *out = *in out.BaseComponentParentOverride = in.BaseComponentParentOverride - out.VolumeParentOverride = in.VolumeParentOverride + in.VolumeParentOverride.DeepCopyInto(&out.VolumeParentOverride) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentParentOverride. @@ -2701,7 +3628,7 @@ func (in *VolumeComponentParentOverride) DeepCopy() *VolumeComponentParentOverri func (in *VolumeComponentPluginOverride) DeepCopyInto(out *VolumeComponentPluginOverride) { *out = *in out.BaseComponentPluginOverride = in.BaseComponentPluginOverride - out.VolumePluginOverride = in.VolumePluginOverride + in.VolumePluginOverride.DeepCopyInto(&out.VolumePluginOverride) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentPluginOverride. @@ -2718,7 +3645,7 @@ func (in *VolumeComponentPluginOverride) DeepCopy() *VolumeComponentPluginOverri func (in *VolumeComponentPluginOverrideParentOverride) DeepCopyInto(out *VolumeComponentPluginOverrideParentOverride) { *out = *in out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride - out.VolumePluginOverrideParentOverride = in.VolumePluginOverrideParentOverride + in.VolumePluginOverrideParentOverride.DeepCopyInto(&out.VolumePluginOverrideParentOverride) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentPluginOverrideParentOverride. @@ -2794,6 +3721,11 @@ func (in *VolumeMountPluginOverrideParentOverride) DeepCopy() *VolumeMountPlugin // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeParentOverride) DeepCopyInto(out *VolumeParentOverride) { *out = *in + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeParentOverride. @@ -2809,6 +3741,11 @@ func (in *VolumeParentOverride) DeepCopy() *VolumeParentOverride { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumePluginOverride) DeepCopyInto(out *VolumePluginOverride) { *out = *in + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumePluginOverride. @@ -2824,6 +3761,11 @@ func (in *VolumePluginOverride) DeepCopy() *VolumePluginOverride { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumePluginOverrideParentOverride) DeepCopyInto(out *VolumePluginOverrideParentOverride) { *out = *in + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumePluginOverrideParentOverride. @@ -2836,233 +3778,6 @@ func (in *VolumePluginOverrideParentOverride) DeepCopy() *VolumePluginOverridePa return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommand) DeepCopyInto(out *VscodeConfigurationCommand) { - *out = *in - in.BaseCommand.DeepCopyInto(&out.BaseCommand) - out.VscodeConfigurationCommandLocation = in.VscodeConfigurationCommandLocation -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommand. -func (in *VscodeConfigurationCommand) DeepCopy() *VscodeConfigurationCommand { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommand) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommandLocation) DeepCopyInto(out *VscodeConfigurationCommandLocation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommandLocation. -func (in *VscodeConfigurationCommandLocation) DeepCopy() *VscodeConfigurationCommandLocation { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommandLocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommandLocationParentOverride) DeepCopyInto(out *VscodeConfigurationCommandLocationParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommandLocationParentOverride. -func (in *VscodeConfigurationCommandLocationParentOverride) DeepCopy() *VscodeConfigurationCommandLocationParentOverride { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommandLocationParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommandLocationPluginOverride) DeepCopyInto(out *VscodeConfigurationCommandLocationPluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommandLocationPluginOverride. -func (in *VscodeConfigurationCommandLocationPluginOverride) DeepCopy() *VscodeConfigurationCommandLocationPluginOverride { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommandLocationPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommandLocationPluginOverrideParentOverride) DeepCopyInto(out *VscodeConfigurationCommandLocationPluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommandLocationPluginOverrideParentOverride. -func (in *VscodeConfigurationCommandLocationPluginOverrideParentOverride) DeepCopy() *VscodeConfigurationCommandLocationPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommandLocationPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommandParentOverride) DeepCopyInto(out *VscodeConfigurationCommandParentOverride) { - *out = *in - in.BaseCommandParentOverride.DeepCopyInto(&out.BaseCommandParentOverride) - out.VscodeConfigurationCommandLocationParentOverride = in.VscodeConfigurationCommandLocationParentOverride -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommandParentOverride. -func (in *VscodeConfigurationCommandParentOverride) DeepCopy() *VscodeConfigurationCommandParentOverride { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommandParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommandPluginOverride) DeepCopyInto(out *VscodeConfigurationCommandPluginOverride) { - *out = *in - in.BaseCommandPluginOverride.DeepCopyInto(&out.BaseCommandPluginOverride) - out.VscodeConfigurationCommandLocationPluginOverride = in.VscodeConfigurationCommandLocationPluginOverride -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommandPluginOverride. -func (in *VscodeConfigurationCommandPluginOverride) DeepCopy() *VscodeConfigurationCommandPluginOverride { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommandPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VscodeConfigurationCommandPluginOverrideParentOverride) DeepCopyInto(out *VscodeConfigurationCommandPluginOverrideParentOverride) { - *out = *in - in.BaseCommandPluginOverrideParentOverride.DeepCopyInto(&out.BaseCommandPluginOverrideParentOverride) - out.VscodeConfigurationCommandLocationPluginOverrideParentOverride = in.VscodeConfigurationCommandLocationPluginOverrideParentOverride -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VscodeConfigurationCommandPluginOverrideParentOverride. -func (in *VscodeConfigurationCommandPluginOverrideParentOverride) DeepCopy() *VscodeConfigurationCommandPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(VscodeConfigurationCommandPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkspaceCondition) DeepCopyInto(out *WorkspaceCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceCondition. -func (in *WorkspaceCondition) DeepCopy() *WorkspaceCondition { - if in == nil { - return nil - } - out := new(WorkspaceCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkspaceEvents) DeepCopyInto(out *WorkspaceEvents) { - *out = *in - if in.PreStart != nil { - in, out := &in.PreStart, &out.PreStart - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PostStop != nil { - in, out := &in.PostStop, &out.PostStop - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceEvents. -func (in *WorkspaceEvents) DeepCopy() *WorkspaceEvents { - if in == nil { - return nil - } - out := new(WorkspaceEvents) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkspacePodContributions) DeepCopyInto(out *WorkspacePodContributions) { - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]v1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.CommonEnv != nil { - in, out := &in.CommonEnv, &out.CommonEnv - *out = make([]v1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePodContributions. -func (in *WorkspacePodContributions) DeepCopy() *WorkspacePodContributions { - if in == nil { - return nil - } - out := new(WorkspacePodContributions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZipProjectSource) DeepCopyInto(out *ZipProjectSource) { *out = *in diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.getters.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.getters.go new file mode 100644 index 00000000000..4c7c36a30f3 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.getters.go @@ -0,0 +1,43 @@ +package v1alpha2 + +// GetIsDefault returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker +func (in *CommandGroup) GetIsDefault() bool { + return getBoolOrDefault(in.IsDefault, false) +} + +// GetHotReloadCapable returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker +func (in *ExecCommand) GetHotReloadCapable() bool { + return getBoolOrDefault(in.HotReloadCapable, false) +} + +// GetParallel returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker +func (in *CompositeCommand) GetParallel() bool { + return getBoolOrDefault(in.Parallel, false) +} + +// GetDedicatedPod returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker +func (in *Container) GetDedicatedPod() bool { + return getBoolOrDefault(in.DedicatedPod, false) +} + +// GetRootRequired returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker +func (in *Dockerfile) GetRootRequired() bool { + return getBoolOrDefault(in.RootRequired, false) +} + +// GetEphemeral returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker +func (in *Volume) GetEphemeral() bool { + return getBoolOrDefault(in.Ephemeral, false) +} + +// GetSecure returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker +func (in *Endpoint) GetSecure() bool { + return getBoolOrDefault(in.Secure, false) +} + +func getBoolOrDefault(input *bool, defaultVal bool) bool { + if input != nil { + return *input + } + return defaultVal +} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go index 933e3b4e5ff..4886db49e64 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go @@ -8,6 +8,21 @@ import ( type ParentOverrides struct { OverridesBase `json:",inline"` + // Overrides of variables encapsulated in a parent devfile. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchStrategy=merge + Variables map[string]string `json:"variables,omitempty" patchStrategy:"merge"` + + // Overrides of attributes encapsulated in a parent devfile. + // Overriding is done according to K8S strategic merge patch standard rules. + // +optional + // +patchStrategy=merge + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless + Attributes attributes.Attributes `json:"attributes,omitempty" patchStrategy:"merge"` + // Overrides of components encapsulated in a parent devfile or a plugin. // Overriding is done according to K8S strategic merge patch standard rules. // +optional @@ -53,6 +68,9 @@ type ComponentParentOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` ComponentUnionParentOverride `json:",inline"` } @@ -66,16 +84,15 @@ type ProjectParentOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. // +optional ClonePath string `json:"clonePath,omitempty"` - // Populate the project sparsely with selected directories. - // +optional - SparseCheckoutDirs []string `json:"sparseCheckoutDirs,omitempty"` - ProjectSourceParentOverride `json:",inline"` } @@ -88,6 +105,9 @@ type StarterProjectParentOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` // Description of a starter project @@ -112,6 +132,9 @@ type CommandParentOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` CommandUnionParentOverride `json:",inline"` } @@ -119,25 +142,25 @@ type CommandParentOverride struct { // +union type ComponentUnionParentOverride struct { - // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Plugin + // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image;Plugin // Type of component // // +unionDiscriminator // +optional ComponentType ComponentTypeParentOverride `json:"componentType,omitempty"` - // Allows adding and configuring workspace-related containers + // Allows adding and configuring devworkspace-related containers // +optional Container *ContainerComponentParentOverride `json:"container,omitempty"` - // Allows importing into the workspace the Kubernetes resources + // Allows importing into the devworkspace the Kubernetes resources // defined in a given manifest. For example this allows reusing the Kubernetes // definitions used to deploy some runtime components in production. // // +optional Kubernetes *KubernetesComponentParentOverride `json:"kubernetes,omitempty"` - // Allows importing into the workspace the OpenShift resources + // Allows importing into the devworkspace the OpenShift resources // defined in a given manifest. For example this allows reusing the OpenShift // definitions used to deploy some runtime components in production. // @@ -149,6 +172,10 @@ type ComponentUnionParentOverride struct { // +optional Volume *VolumeComponentParentOverride `json:"volume,omitempty"` + // Allows specifying the definition of an image for outer loop builds + // +optional + Image *ImageComponentParentOverride `json:"image,omitempty"` + // Allows importing a plugin. // // Plugins are mainly imported devfiles that contribute components, commands @@ -163,7 +190,7 @@ type ComponentUnionParentOverride struct { // +union type ProjectSourceParentOverride struct { - // +kubebuilder:validation:Enum=Git;Github;Zip + // +kubebuilder:validation:Enum=Git;Zip // Type of project source // + // +unionDiscriminator @@ -174,10 +201,6 @@ type ProjectSourceParentOverride struct { // +optional Git *GitProjectSourceParentOverride `json:"git,omitempty"` - // Project's GitHub source - // +optional - Github *GithubProjectSourceParentOverride `json:"github,omitempty"` - // Project's Zip source // +optional Zip *ZipProjectSourceParentOverride `json:"zip,omitempty"` @@ -186,8 +209,8 @@ type ProjectSourceParentOverride struct { // +union type CommandUnionParentOverride struct { - // +kubebuilder:validation:Enum=Exec;Apply;VscodeTask;VscodeLaunch;Composite - // Type of workspace command + // +kubebuilder:validation:Enum=Exec;Apply;Composite + // Type of devworkspace command // +unionDiscriminator // +optional CommandType CommandTypeParentOverride `json:"commandType,omitempty"` @@ -197,27 +220,19 @@ type CommandUnionParentOverride struct { Exec *ExecCommandParentOverride `json:"exec,omitempty"` // Command that consists in applying a given component definition, - // typically bound to a workspace event. + // typically bound to a devworkspace event. // // For example, when an `apply` command is bound to a `preStart` event, // and references a `container` component, it will start the container as a - // K8S initContainer in the workspace POD, unless the component has its + // K8S initContainer in the devworkspace POD, unless the component has its // `dedicatedPod` field set to `true`. // // When no `apply` command exist for a given component, - // it is assumed the component will be applied at workspace start + // it is assumed the component will be applied at devworkspace start // by default. // +optional Apply *ApplyCommandParentOverride `json:"apply,omitempty"` - // Command providing the definition of a VsCode Task - // +optional - VscodeTask *VscodeConfigurationCommandParentOverride `json:"vscodeTask,omitempty"` - - // Command providing the definition of a VsCode launch action - // +optional - VscodeLaunch *VscodeConfigurationCommandParentOverride `json:"vscodeLaunch,omitempty"` - // Composite command that allows executing several sub-commands // either sequentially or concurrently // +optional @@ -228,29 +243,35 @@ type CommandUnionParentOverride struct { // Only one of the following component type may be specified. type ComponentTypeParentOverride string -// Component that allows the developer to add a configured container into his workspace +// Component that allows the developer to add a configured container into their devworkspace type ContainerComponentParentOverride struct { BaseComponentParentOverride `json:",inline"` ContainerParentOverride `json:",inline"` Endpoints []EndpointParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` } -// Component that allows partly importing Kubernetes resources into the workspace POD +// Component that allows partly importing Kubernetes resources into the devworkspace POD type KubernetesComponentParentOverride struct { K8sLikeComponentParentOverride `json:",inline"` } -// Component that allows partly importing Openshift resources into the workspace POD +// Component that allows partly importing Openshift resources into the devworkspace POD type OpenshiftComponentParentOverride struct { K8sLikeComponentParentOverride `json:",inline"` } -// Component that allows the developer to declare and configure a volume into his workspace +// Component that allows the developer to declare and configure a volume into their devworkspace type VolumeComponentParentOverride struct { BaseComponentParentOverride `json:",inline"` VolumeParentOverride `json:",inline"` } +// Component that allows the developer to build a runtime image for outerloop +type ImageComponentParentOverride struct { + BaseComponentParentOverride `json:",inline"` + ImageParentOverride `json:",inline"` +} + type PluginComponentParentOverride struct { BaseComponentParentOverride `json:",inline"` ImportReferenceParentOverride `json:",inline"` @@ -267,10 +288,6 @@ type GitProjectSourceParentOverride struct { GitLikeProjectSourceParentOverride `json:",inline"` } -type GithubProjectSourceParentOverride struct { - GitLikeProjectSourceParentOverride `json:",inline"` -} - type ZipProjectSourceParentOverride struct { CommonProjectSourceParentOverride `json:",inline"` @@ -323,7 +340,7 @@ type ExecCommandParentOverride struct { // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. // // Default value is `false` - HotReloadCapable bool `json:"hotReloadCapable,omitempty"` + HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` } type ApplyCommandParentOverride struct { @@ -335,11 +352,6 @@ type ApplyCommandParentOverride struct { Component string `json:"component,omitempty"` } -type VscodeConfigurationCommandParentOverride struct { - BaseCommandParentOverride `json:",inline"` - VscodeConfigurationCommandLocationParentOverride `json:",inline"` -} - type CompositeCommandParentOverride struct { LabeledCommandParentOverride `json:",inline"` @@ -348,11 +360,11 @@ type CompositeCommandParentOverride struct { // Indicates if the sub-commands should be executed concurrently // +optional - Parallel bool `json:"parallel,omitempty"` + Parallel *bool `json:"parallel,omitempty"` } -// Workspace component: Anything that will bring additional features / tooling / behaviour / context -// to the workspace, in order to make working in it easier. +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. type BaseComponentParentOverride struct { } @@ -418,7 +430,7 @@ type ContainerParentOverride struct { // // Default value is `false` // +optional - DedicatedPod bool `json:"dedicatedPod,omitempty"` + DedicatedPod *bool `json:"dedicatedPod,omitempty"` } type EndpointParentOverride struct { @@ -435,12 +447,12 @@ type EndpointParentOverride struct { // - `public` means that the endpoint will be exposed on the public network, typically through // a K8S ingress or an OpenShift route. // - // - `internal` means that the endpoint will be exposed internally outside of the main workspace POD, + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, // typically by K8S services, to be consumed by other elements running // on the same cloud internal network. // // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main workspace POD, on a local address. + // inside the main devworkspace POD, on a local address. // // Default value is `public` // +optional @@ -469,7 +481,7 @@ type EndpointParentOverride struct { // Describes whether the endpoint should be secured and protected by some // authentication process. This requires a protocol of `https` or `wss`. // +optional - Secure bool `json:"secure,omitempty"` + Secure *bool `json:"secure,omitempty"` // Path of the endpoint URL // +optional @@ -483,6 +495,9 @@ type EndpointParentOverride struct { // // - type: "terminal" / "ide" / "ide-dev", // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` } @@ -498,11 +513,27 @@ type VolumeParentOverride struct { // +optional // Size of the volume Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + Ephemeral *bool `json:"ephemeral,omitempty"` +} + +type ImageParentOverride struct { + + // +optional + // Name of the image for the resulting outerloop build + ImageName string `json:"imageName,omitempty"` + ImageUnionParentOverride `json:",inline"` } type ImportReferenceParentOverride struct { ImportReferenceUnionParentOverride `json:",inline"` + // Registry URL to pull the parent devfile from when using id in the parent reference. + // To ensure the parent devfile gets resolved consistently in different environments, + // it is recommended to always specify the `registryUrl` when `id` is used. // +optional RegistryUrl string `json:"registryUrl,omitempty"` } @@ -535,7 +566,8 @@ type GitLikeProjectSourceParentOverride struct { CheckoutFrom *CheckoutFromParentOverride `json:"checkoutFrom,omitempty"` // +optional - // The remotes map which should be initialized in the git project. Must have at least one remote configured + // The remotes map which should be initialized in the git project. + // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. Remotes map[string]string `json:"remotes,omitempty"` } @@ -557,33 +589,6 @@ type EnvVarParentOverride struct { Value string `json:"value,omitempty" yaml:"value"` } -type BaseCommandParentOverride struct { - - // +optional - // Defines the group this command is part of - Group *CommandGroupParentOverride `json:"group,omitempty"` -} - -// +union -type VscodeConfigurationCommandLocationParentOverride struct { - - // +kubebuilder:validation:Enum=Uri;Inlined - // Type of Vscode configuration command location - // + - // +unionDiscriminator - // +optional - LocationType VscodeConfigurationCommandLocationTypeParentOverride `json:"locationType,omitempty"` - - // Location as an absolute of relative URI - // the VsCode configuration will be fetched from - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined content of the VsCode configuration - // +optional - Inlined string `json:"inlined,omitempty"` -} - // Volume that should be mounted to a component container type VolumeMountParentOverride struct { @@ -629,6 +634,21 @@ type K8sLikeComponentLocationParentOverride struct { Inlined string `json:"inlined,omitempty"` } +// +union +type ImageUnionParentOverride struct { + + // +kubebuilder:validation:Enum=Dockerfile + // Type of image + // + // +unionDiscriminator + // +optional + ImageType ImageTypeParentOverride `json:"imageType,omitempty"` + + // Allows specifying dockerfile type build + // +optional + Dockerfile *DockerfileImageParentOverride `json:"dockerfile,omitempty"` +} + // Location from where the an import reference is retrieved // +union type ImportReferenceUnionParentOverride struct { @@ -640,7 +660,8 @@ type ImportReferenceUnionParentOverride struct { // +optional ImportReferenceType ImportReferenceTypeParentOverride `json:"importReferenceType,omitempty"` - // Uri of a Devfile yaml file + // URI Reference of a parent devfile YAML file. + // It can be a full URL or a relative URI with the current devfile as the base URI. // +optional Uri string `json:"uri,omitempty"` @@ -669,6 +690,9 @@ type ComponentPluginOverrideParentOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` ComponentUnionPluginOverrideParentOverride `json:",inline"` } @@ -684,6 +708,9 @@ type CommandPluginOverrideParentOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` CommandUnionPluginOverrideParentOverride `json:",inline"` } @@ -700,27 +727,29 @@ type CheckoutFromParentOverride struct { Remote string `json:"remote,omitempty"` } -type CommandGroupParentOverride struct { - - // +optional - // Kind of group the command is part of - Kind CommandGroupKindParentOverride `json:"kind,omitempty"` +type BaseCommandParentOverride struct { // +optional - // Identifies the default command for a given group kind - IsDefault bool `json:"isDefault,omitempty"` + // Defines the group this command is part of + Group *CommandGroupParentOverride `json:"group,omitempty"` } -// VscodeConfigurationCommandLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type VscodeConfigurationCommandLocationTypeParentOverride string - // K8sLikeComponentLocationType describes the type of // the location the configuration is fetched from. // Only one of the following component type may be specified. type K8sLikeComponentLocationTypeParentOverride string +// ImageType describes the type of image. +// Only one of the following image type may be specified. +type ImageTypeParentOverride string + +// Dockerfile Image type to specify the outerloop build using a Dockerfile +type DockerfileImageParentOverride struct { + BaseImageParentOverride `json:",inline"` + DockerfileSrcParentOverride `json:",inline"` + DockerfileParentOverride `json:",inline"` +} + // ImportReferenceType describes the type of location // from where the referenced template structure should be retrieved. // Only one of the following parent locations may be specified. @@ -737,25 +766,25 @@ type KubernetesCustomResourceImportReferenceParentOverride struct { // +union type ComponentUnionPluginOverrideParentOverride struct { - // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume + // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image // Type of component // // +unionDiscriminator // +optional ComponentType ComponentTypePluginOverrideParentOverride `json:"componentType,omitempty"` - // Allows adding and configuring workspace-related containers + // Allows adding and configuring devworkspace-related containers // +optional Container *ContainerComponentPluginOverrideParentOverride `json:"container,omitempty"` - // Allows importing into the workspace the Kubernetes resources + // Allows importing into the devworkspace the Kubernetes resources // defined in a given manifest. For example this allows reusing the Kubernetes // definitions used to deploy some runtime components in production. // // +optional Kubernetes *KubernetesComponentPluginOverrideParentOverride `json:"kubernetes,omitempty"` - // Allows importing into the workspace the OpenShift resources + // Allows importing into the devworkspace the OpenShift resources // defined in a given manifest. For example this allows reusing the OpenShift // definitions used to deploy some runtime components in production. // @@ -766,13 +795,17 @@ type ComponentUnionPluginOverrideParentOverride struct { // shared by several other components // +optional Volume *VolumeComponentPluginOverrideParentOverride `json:"volume,omitempty"` + + // Allows specifying the definition of an image for outer loop builds + // +optional + Image *ImageComponentPluginOverrideParentOverride `json:"image,omitempty"` } // +union type CommandUnionPluginOverrideParentOverride struct { - // +kubebuilder:validation:Enum=Exec;Apply;VscodeTask;VscodeLaunch;Composite - // Type of workspace command + // +kubebuilder:validation:Enum=Exec;Apply;Composite + // Type of devworkspace command // +unionDiscriminator // +optional CommandType CommandTypePluginOverrideParentOverride `json:"commandType,omitempty"` @@ -782,64 +815,113 @@ type CommandUnionPluginOverrideParentOverride struct { Exec *ExecCommandPluginOverrideParentOverride `json:"exec,omitempty"` // Command that consists in applying a given component definition, - // typically bound to a workspace event. + // typically bound to a devworkspace event. // // For example, when an `apply` command is bound to a `preStart` event, // and references a `container` component, it will start the container as a - // K8S initContainer in the workspace POD, unless the component has its + // K8S initContainer in the devworkspace POD, unless the component has its // `dedicatedPod` field set to `true`. // // When no `apply` command exist for a given component, - // it is assumed the component will be applied at workspace start + // it is assumed the component will be applied at devworkspace start // by default. // +optional Apply *ApplyCommandPluginOverrideParentOverride `json:"apply,omitempty"` - // Command providing the definition of a VsCode Task + // Composite command that allows executing several sub-commands + // either sequentially or concurrently // +optional - VscodeTask *VscodeConfigurationCommandPluginOverrideParentOverride `json:"vscodeTask,omitempty"` + Composite *CompositeCommandPluginOverrideParentOverride `json:"composite,omitempty"` +} + +type CommandGroupParentOverride struct { + + // +optional + // Kind of group the command is part of + Kind CommandGroupKindParentOverride `json:"kind,omitempty"` - // Command providing the definition of a VsCode launch action // +optional - VscodeLaunch *VscodeConfigurationCommandPluginOverrideParentOverride `json:"vscodeLaunch,omitempty"` + // Identifies the default command for a given group kind + IsDefault *bool `json:"isDefault,omitempty"` +} - // Composite command that allows executing several sub-commands - // either sequentially or concurrently +type BaseImageParentOverride struct { +} + +// +union +type DockerfileSrcParentOverride struct { + + // +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git + // Type of Dockerfile src + // + + // +unionDiscriminator // +optional - Composite *CompositeCommandPluginOverrideParentOverride `json:"composite,omitempty"` + SrcType DockerfileSrcTypeParentOverride `json:"srcType,omitempty"` + + // URI Reference of a Dockerfile. + // It can be a full URL or a relative URI from the current devfile as the base URI. + // +optional + Uri string `json:"uri,omitempty"` + + // Dockerfile's Devfile Registry source + // +optional + DevfileRegistry *DockerfileDevfileRegistrySourceParentOverride `json:"devfileRegistry,omitempty"` + + // Dockerfile's Git source + // +optional + Git *DockerfileGitProjectSourceParentOverride `json:"git,omitempty"` } -// CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug -type CommandGroupKindParentOverride string +type DockerfileParentOverride struct { + + // Path of source directory to establish build context. Defaults to ${PROJECT_ROOT} in the container + // +optional + BuildContext string `json:"buildContext,omitempty"` + + // The arguments to supply to the dockerfile build. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Specify if a privileged builder pod is required. + // + // Default value is `false` + // +optional + RootRequired *bool `json:"rootRequired,omitempty"` +} // ComponentType describes the type of component. // Only one of the following component type may be specified. type ComponentTypePluginOverrideParentOverride string -// Component that allows the developer to add a configured container into his workspace +// Component that allows the developer to add a configured container into their devworkspace type ContainerComponentPluginOverrideParentOverride struct { BaseComponentPluginOverrideParentOverride `json:",inline"` ContainerPluginOverrideParentOverride `json:",inline"` Endpoints []EndpointPluginOverrideParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` } -// Component that allows partly importing Kubernetes resources into the workspace POD +// Component that allows partly importing Kubernetes resources into the devworkspace POD type KubernetesComponentPluginOverrideParentOverride struct { K8sLikeComponentPluginOverrideParentOverride `json:",inline"` } -// Component that allows partly importing Openshift resources into the workspace POD +// Component that allows partly importing Openshift resources into the devworkspace POD type OpenshiftComponentPluginOverrideParentOverride struct { K8sLikeComponentPluginOverrideParentOverride `json:",inline"` } -// Component that allows the developer to declare and configure a volume into his workspace +// Component that allows the developer to declare and configure a volume into their devworkspace type VolumeComponentPluginOverrideParentOverride struct { BaseComponentPluginOverrideParentOverride `json:",inline"` VolumePluginOverrideParentOverride `json:",inline"` } +// Component that allows the developer to build a runtime image for outerloop +type ImageComponentPluginOverrideParentOverride struct { + BaseComponentPluginOverrideParentOverride `json:",inline"` + ImagePluginOverrideParentOverride `json:",inline"` +} + // CommandType describes the type of command. // Only one of the following command type may be specified. type CommandTypePluginOverrideParentOverride string @@ -884,7 +966,7 @@ type ExecCommandPluginOverrideParentOverride struct { // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. // // Default value is `false` - HotReloadCapable bool `json:"hotReloadCapable,omitempty"` + HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` } type ApplyCommandPluginOverrideParentOverride struct { @@ -896,11 +978,6 @@ type ApplyCommandPluginOverrideParentOverride struct { Component string `json:"component,omitempty"` } -type VscodeConfigurationCommandPluginOverrideParentOverride struct { - BaseCommandPluginOverrideParentOverride `json:",inline"` - VscodeConfigurationCommandLocationPluginOverrideParentOverride `json:",inline"` -} - type CompositeCommandPluginOverrideParentOverride struct { LabeledCommandPluginOverrideParentOverride `json:",inline"` @@ -909,11 +986,46 @@ type CompositeCommandPluginOverrideParentOverride struct { // Indicates if the sub-commands should be executed concurrently // +optional - Parallel bool `json:"parallel,omitempty"` + Parallel *bool `json:"parallel,omitempty"` +} + +// CommandGroupKind describes the kind of command group. +// +kubebuilder:validation:Enum=build;run;test;debug;deploy +type CommandGroupKindParentOverride string + +// DockerfileSrcType describes the type of +// the src for the Dockerfile outerloop build. +// Only one of the following location type may be specified. +type DockerfileSrcTypeParentOverride string + +type DockerfileDevfileRegistrySourceParentOverride struct { + + // +optional + // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry + // required for the Dockerfile build will be downloaded for building the image. + Id string `json:"id,omitempty"` + + // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. + // To ensure the Dockerfile gets resolved consistently in different environments, + // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. + // +optional + RegistryUrl string `json:"registryUrl,omitempty"` } -// Workspace component: Anything that will bring additional features / tooling / behaviour / context -// to the workspace, in order to make working in it easier. +type DockerfileGitProjectSourceParentOverride struct { + + // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be + // cloned for building the image. + GitProjectSourceParentOverride `json:",inline"` + + // Location of the Dockerfile in the Git repository when using git as Dockerfile src. + // Defaults to Dockerfile. + // +optional + FileLocation string `json:"fileLocation,omitempty"` +} + +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. type BaseComponentPluginOverrideParentOverride struct { } @@ -980,7 +1092,7 @@ type ContainerPluginOverrideParentOverride struct { // // Default value is `false` // +optional - DedicatedPod bool `json:"dedicatedPod,omitempty"` + DedicatedPod *bool `json:"dedicatedPod,omitempty"` } type EndpointPluginOverrideParentOverride struct { @@ -997,12 +1109,12 @@ type EndpointPluginOverrideParentOverride struct { // - `public` means that the endpoint will be exposed on the public network, typically through // a K8S ingress or an OpenShift route. // - // - `internal` means that the endpoint will be exposed internally outside of the main workspace POD, + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, // typically by K8S services, to be consumed by other elements running // on the same cloud internal network. // // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main workspace POD, on a local address. + // inside the main devworkspace POD, on a local address. // // Default value is `public` // +optional @@ -1031,7 +1143,7 @@ type EndpointPluginOverrideParentOverride struct { // Describes whether the endpoint should be secured and protected by some // authentication process. This requires a protocol of `https` or `wss`. // +optional - Secure bool `json:"secure,omitempty"` + Secure *bool `json:"secure,omitempty"` // Path of the endpoint URL // +optional @@ -1045,6 +1157,9 @@ type EndpointPluginOverrideParentOverride struct { // // - type: "terminal" / "ide" / "ide-dev", // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` } @@ -1060,6 +1175,19 @@ type VolumePluginOverrideParentOverride struct { // +optional // Size of the volume Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + Ephemeral *bool `json:"ephemeral,omitempty"` +} + +type ImagePluginOverrideParentOverride struct { + + // +optional + // Name of the image for the resulting outerloop build + ImageName string `json:"imageName,omitempty"` + ImageUnionPluginOverrideParentOverride `json:",inline"` } type LabeledCommandPluginOverrideParentOverride struct { @@ -1078,33 +1206,6 @@ type EnvVarPluginOverrideParentOverride struct { Value string `json:"value,omitempty" yaml:"value"` } -type BaseCommandPluginOverrideParentOverride struct { - - // +optional - // Defines the group this command is part of - Group *CommandGroupPluginOverrideParentOverride `json:"group,omitempty"` -} - -// +union -type VscodeConfigurationCommandLocationPluginOverrideParentOverride struct { - - // +kubebuilder:validation:Enum=Uri;Inlined - // Type of Vscode configuration command location - // + - // +unionDiscriminator - // +optional - LocationType VscodeConfigurationCommandLocationTypePluginOverrideParentOverride `json:"locationType,omitempty"` - - // Location as an absolute of relative URI - // the VsCode configuration will be fetched from - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined content of the VsCode configuration - // +optional - Inlined string `json:"inlined,omitempty"` -} - // Volume that should be mounted to a component container type VolumeMountPluginOverrideParentOverride struct { @@ -1150,6 +1251,44 @@ type K8sLikeComponentLocationPluginOverrideParentOverride struct { Inlined string `json:"inlined,omitempty"` } +// +union +type ImageUnionPluginOverrideParentOverride struct { + + // +kubebuilder:validation:Enum=Dockerfile + // Type of image + // + // +unionDiscriminator + // +optional + ImageType ImageTypePluginOverrideParentOverride `json:"imageType,omitempty"` + + // Allows specifying dockerfile type build + // +optional + Dockerfile *DockerfileImagePluginOverrideParentOverride `json:"dockerfile,omitempty"` +} + +type BaseCommandPluginOverrideParentOverride struct { + + // +optional + // Defines the group this command is part of + Group *CommandGroupPluginOverrideParentOverride `json:"group,omitempty"` +} + +// K8sLikeComponentLocationType describes the type of +// the location the configuration is fetched from. +// Only one of the following component type may be specified. +type K8sLikeComponentLocationTypePluginOverrideParentOverride string + +// ImageType describes the type of image. +// Only one of the following image type may be specified. +type ImageTypePluginOverrideParentOverride string + +// Dockerfile Image type to specify the outerloop build using a Dockerfile +type DockerfileImagePluginOverrideParentOverride struct { + BaseImagePluginOverrideParentOverride `json:",inline"` + DockerfileSrcPluginOverrideParentOverride `json:",inline"` + DockerfilePluginOverrideParentOverride `json:",inline"` +} + type CommandGroupPluginOverrideParentOverride struct { // +optional @@ -1158,21 +1297,118 @@ type CommandGroupPluginOverrideParentOverride struct { // +optional // Identifies the default command for a given group kind - IsDefault bool `json:"isDefault,omitempty"` + IsDefault *bool `json:"isDefault,omitempty"` } -// VscodeConfigurationCommandLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type VscodeConfigurationCommandLocationTypePluginOverrideParentOverride string +type BaseImagePluginOverrideParentOverride struct { +} -// K8sLikeComponentLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type K8sLikeComponentLocationTypePluginOverrideParentOverride string +// +union +type DockerfileSrcPluginOverrideParentOverride struct { + + // +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git + // Type of Dockerfile src + // + + // +unionDiscriminator + // +optional + SrcType DockerfileSrcTypePluginOverrideParentOverride `json:"srcType,omitempty"` + + // URI Reference of a Dockerfile. + // It can be a full URL or a relative URI from the current devfile as the base URI. + // +optional + Uri string `json:"uri,omitempty"` + + // Dockerfile's Devfile Registry source + // +optional + DevfileRegistry *DockerfileDevfileRegistrySourcePluginOverrideParentOverride `json:"devfileRegistry,omitempty"` + + // Dockerfile's Git source + // +optional + Git *DockerfileGitProjectSourcePluginOverrideParentOverride `json:"git,omitempty"` +} + +type DockerfilePluginOverrideParentOverride struct { + + // Path of source directory to establish build context. Defaults to ${PROJECT_ROOT} in the container + // +optional + BuildContext string `json:"buildContext,omitempty"` + + // The arguments to supply to the dockerfile build. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Specify if a privileged builder pod is required. + // + // Default value is `false` + // +optional + RootRequired *bool `json:"rootRequired,omitempty"` +} // CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug +// +kubebuilder:validation:Enum=build;run;test;debug;deploy type CommandGroupKindPluginOverrideParentOverride string +// DockerfileSrcType describes the type of +// the src for the Dockerfile outerloop build. +// Only one of the following location type may be specified. +type DockerfileSrcTypePluginOverrideParentOverride string + +type DockerfileDevfileRegistrySourcePluginOverrideParentOverride struct { + + // +optional + // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry + // required for the Dockerfile build will be downloaded for building the image. + Id string `json:"id,omitempty"` + + // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. + // To ensure the Dockerfile gets resolved consistently in different environments, + // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. + // +optional + RegistryUrl string `json:"registryUrl,omitempty"` +} + +type DockerfileGitProjectSourcePluginOverrideParentOverride struct { + + // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be + // cloned for building the image. + GitProjectSourcePluginOverrideParentOverride `json:",inline"` + + // Location of the Dockerfile in the Git repository when using git as Dockerfile src. + // Defaults to Dockerfile. + // +optional + FileLocation string `json:"fileLocation,omitempty"` +} + +type GitProjectSourcePluginOverrideParentOverride struct { + GitLikeProjectSourcePluginOverrideParentOverride `json:",inline"` +} + +type GitLikeProjectSourcePluginOverrideParentOverride struct { + CommonProjectSourcePluginOverrideParentOverride `json:",inline"` + + // Defines from what the project should be checked out. Required if there are more than one remote configured + // +optional + CheckoutFrom *CheckoutFromPluginOverrideParentOverride `json:"checkoutFrom,omitempty"` + + // +optional + // The remotes map which should be initialized in the git project. + // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. + Remotes map[string]string `json:"remotes,omitempty"` +} + +type CommonProjectSourcePluginOverrideParentOverride struct { +} + +type CheckoutFromPluginOverrideParentOverride struct { + + // The revision to checkout from. Should be branch name, tag or commit id. + // Default branch is used if missing or specified revision is not found. + // +optional + Revision string `json:"revision,omitempty"` + + // The remote name should be used as init. Required if there are more than one remote configured + // +optional + Remote string `json:"remote,omitempty"` +} + func (overrides ParentOverrides) isOverride() {} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go index 42655fe8860..93ccf8102ff 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go @@ -37,6 +37,9 @@ type ComponentPluginOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` ComponentUnionPluginOverride `json:",inline"` } @@ -52,6 +55,9 @@ type CommandPluginOverride struct { // Map of implementation-dependant free-form YAML attributes. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` CommandUnionPluginOverride `json:",inline"` } @@ -59,25 +65,25 @@ type CommandPluginOverride struct { // +union type ComponentUnionPluginOverride struct { - // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume + // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image // Type of component // // +unionDiscriminator // +optional ComponentType ComponentTypePluginOverride `json:"componentType,omitempty"` - // Allows adding and configuring workspace-related containers + // Allows adding and configuring devworkspace-related containers // +optional Container *ContainerComponentPluginOverride `json:"container,omitempty"` - // Allows importing into the workspace the Kubernetes resources + // Allows importing into the devworkspace the Kubernetes resources // defined in a given manifest. For example this allows reusing the Kubernetes // definitions used to deploy some runtime components in production. // // +optional Kubernetes *KubernetesComponentPluginOverride `json:"kubernetes,omitempty"` - // Allows importing into the workspace the OpenShift resources + // Allows importing into the devworkspace the OpenShift resources // defined in a given manifest. For example this allows reusing the OpenShift // definitions used to deploy some runtime components in production. // @@ -88,13 +94,17 @@ type ComponentUnionPluginOverride struct { // shared by several other components // +optional Volume *VolumeComponentPluginOverride `json:"volume,omitempty"` + + // Allows specifying the definition of an image for outer loop builds + // +optional + Image *ImageComponentPluginOverride `json:"image,omitempty"` } // +union type CommandUnionPluginOverride struct { - // +kubebuilder:validation:Enum=Exec;Apply;VscodeTask;VscodeLaunch;Composite - // Type of workspace command + // +kubebuilder:validation:Enum=Exec;Apply;Composite + // Type of devworkspace command // +unionDiscriminator // +optional CommandType CommandTypePluginOverride `json:"commandType,omitempty"` @@ -104,27 +114,19 @@ type CommandUnionPluginOverride struct { Exec *ExecCommandPluginOverride `json:"exec,omitempty"` // Command that consists in applying a given component definition, - // typically bound to a workspace event. + // typically bound to a devworkspace event. // // For example, when an `apply` command is bound to a `preStart` event, // and references a `container` component, it will start the container as a - // K8S initContainer in the workspace POD, unless the component has its + // K8S initContainer in the devworkspace POD, unless the component has its // `dedicatedPod` field set to `true`. // // When no `apply` command exist for a given component, - // it is assumed the component will be applied at workspace start + // it is assumed the component will be applied at devworkspace start // by default. // +optional Apply *ApplyCommandPluginOverride `json:"apply,omitempty"` - // Command providing the definition of a VsCode Task - // +optional - VscodeTask *VscodeConfigurationCommandPluginOverride `json:"vscodeTask,omitempty"` - - // Command providing the definition of a VsCode launch action - // +optional - VscodeLaunch *VscodeConfigurationCommandPluginOverride `json:"vscodeLaunch,omitempty"` - // Composite command that allows executing several sub-commands // either sequentially or concurrently // +optional @@ -135,29 +137,35 @@ type CommandUnionPluginOverride struct { // Only one of the following component type may be specified. type ComponentTypePluginOverride string -// Component that allows the developer to add a configured container into his workspace +// Component that allows the developer to add a configured container into their devworkspace type ContainerComponentPluginOverride struct { BaseComponentPluginOverride `json:",inline"` ContainerPluginOverride `json:",inline"` Endpoints []EndpointPluginOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` } -// Component that allows partly importing Kubernetes resources into the workspace POD +// Component that allows partly importing Kubernetes resources into the devworkspace POD type KubernetesComponentPluginOverride struct { K8sLikeComponentPluginOverride `json:",inline"` } -// Component that allows partly importing Openshift resources into the workspace POD +// Component that allows partly importing Openshift resources into the devworkspace POD type OpenshiftComponentPluginOverride struct { K8sLikeComponentPluginOverride `json:",inline"` } -// Component that allows the developer to declare and configure a volume into his workspace +// Component that allows the developer to declare and configure a volume into their devworkspace type VolumeComponentPluginOverride struct { BaseComponentPluginOverride `json:",inline"` VolumePluginOverride `json:",inline"` } +// Component that allows the developer to build a runtime image for outerloop +type ImageComponentPluginOverride struct { + BaseComponentPluginOverride `json:",inline"` + ImagePluginOverride `json:",inline"` +} + // CommandType describes the type of command. // Only one of the following command type may be specified. type CommandTypePluginOverride string @@ -202,7 +210,7 @@ type ExecCommandPluginOverride struct { // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. // // Default value is `false` - HotReloadCapable bool `json:"hotReloadCapable,omitempty"` + HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` } type ApplyCommandPluginOverride struct { @@ -214,11 +222,6 @@ type ApplyCommandPluginOverride struct { Component string `json:"component,omitempty"` } -type VscodeConfigurationCommandPluginOverride struct { - BaseCommandPluginOverride `json:",inline"` - VscodeConfigurationCommandLocationPluginOverride `json:",inline"` -} - type CompositeCommandPluginOverride struct { LabeledCommandPluginOverride `json:",inline"` @@ -227,11 +230,11 @@ type CompositeCommandPluginOverride struct { // Indicates if the sub-commands should be executed concurrently // +optional - Parallel bool `json:"parallel,omitempty"` + Parallel *bool `json:"parallel,omitempty"` } -// Workspace component: Anything that will bring additional features / tooling / behaviour / context -// to the workspace, in order to make working in it easier. +// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context +// to the devworkspace, in order to make working in it easier. type BaseComponentPluginOverride struct { } @@ -297,7 +300,7 @@ type ContainerPluginOverride struct { // // Default value is `false` // +optional - DedicatedPod bool `json:"dedicatedPod,omitempty"` + DedicatedPod *bool `json:"dedicatedPod,omitempty"` } type EndpointPluginOverride struct { @@ -314,12 +317,12 @@ type EndpointPluginOverride struct { // - `public` means that the endpoint will be exposed on the public network, typically through // a K8S ingress or an OpenShift route. // - // - `internal` means that the endpoint will be exposed internally outside of the main workspace POD, + // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, // typically by K8S services, to be consumed by other elements running // on the same cloud internal network. // // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main workspace POD, on a local address. + // inside the main devworkspace POD, on a local address. // // Default value is `public` // +optional @@ -348,7 +351,7 @@ type EndpointPluginOverride struct { // Describes whether the endpoint should be secured and protected by some // authentication process. This requires a protocol of `https` or `wss`. // +optional - Secure bool `json:"secure,omitempty"` + Secure *bool `json:"secure,omitempty"` // Path of the endpoint URL // +optional @@ -362,6 +365,9 @@ type EndpointPluginOverride struct { // // - type: "terminal" / "ide" / "ide-dev", // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` } @@ -377,6 +383,19 @@ type VolumePluginOverride struct { // +optional // Size of the volume Size string `json:"size,omitempty"` + + // +optional + // Ephemeral volumes are not stored persistently across restarts. Defaults + // to false + Ephemeral *bool `json:"ephemeral,omitempty"` +} + +type ImagePluginOverride struct { + + // +optional + // Name of the image for the resulting outerloop build + ImageName string `json:"imageName,omitempty"` + ImageUnionPluginOverride `json:",inline"` } type LabeledCommandPluginOverride struct { @@ -394,33 +413,6 @@ type EnvVarPluginOverride struct { Value string `json:"value,omitempty" yaml:"value"` } -type BaseCommandPluginOverride struct { - - // +optional - // Defines the group this command is part of - Group *CommandGroupPluginOverride `json:"group,omitempty"` -} - -// +union -type VscodeConfigurationCommandLocationPluginOverride struct { - - // +kubebuilder:validation:Enum=Uri;Inlined - // Type of Vscode configuration command location - // + - // +unionDiscriminator - // +optional - LocationType VscodeConfigurationCommandLocationTypePluginOverride `json:"locationType,omitempty"` - - // Location as an absolute of relative URI - // the VsCode configuration will be fetched from - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined content of the VsCode configuration - // +optional - Inlined string `json:"inlined,omitempty"` -} - // Volume that should be mounted to a component container type VolumeMountPluginOverride struct { @@ -466,6 +458,44 @@ type K8sLikeComponentLocationPluginOverride struct { Inlined string `json:"inlined,omitempty"` } +// +union +type ImageUnionPluginOverride struct { + + // +kubebuilder:validation:Enum=Dockerfile + // Type of image + // + // +unionDiscriminator + // +optional + ImageType ImageTypePluginOverride `json:"imageType,omitempty"` + + // Allows specifying dockerfile type build + // +optional + Dockerfile *DockerfileImagePluginOverride `json:"dockerfile,omitempty"` +} + +type BaseCommandPluginOverride struct { + + // +optional + // Defines the group this command is part of + Group *CommandGroupPluginOverride `json:"group,omitempty"` +} + +// K8sLikeComponentLocationType describes the type of +// the location the configuration is fetched from. +// Only one of the following component type may be specified. +type K8sLikeComponentLocationTypePluginOverride string + +// ImageType describes the type of image. +// Only one of the following image type may be specified. +type ImageTypePluginOverride string + +// Dockerfile Image type to specify the outerloop build using a Dockerfile +type DockerfileImagePluginOverride struct { + BaseImagePluginOverride `json:",inline"` + DockerfileSrcPluginOverride `json:",inline"` + DockerfilePluginOverride `json:",inline"` +} + type CommandGroupPluginOverride struct { // +optional @@ -474,21 +504,118 @@ type CommandGroupPluginOverride struct { // +optional // Identifies the default command for a given group kind - IsDefault bool `json:"isDefault,omitempty"` + IsDefault *bool `json:"isDefault,omitempty"` } -// VscodeConfigurationCommandLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type VscodeConfigurationCommandLocationTypePluginOverride string +type BaseImagePluginOverride struct { +} -// K8sLikeComponentLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type K8sLikeComponentLocationTypePluginOverride string +// +union +type DockerfileSrcPluginOverride struct { + + // +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git + // Type of Dockerfile src + // + + // +unionDiscriminator + // +optional + SrcType DockerfileSrcTypePluginOverride `json:"srcType,omitempty"` + + // URI Reference of a Dockerfile. + // It can be a full URL or a relative URI from the current devfile as the base URI. + // +optional + Uri string `json:"uri,omitempty"` + + // Dockerfile's Devfile Registry source + // +optional + DevfileRegistry *DockerfileDevfileRegistrySourcePluginOverride `json:"devfileRegistry,omitempty"` + + // Dockerfile's Git source + // +optional + Git *DockerfileGitProjectSourcePluginOverride `json:"git,omitempty"` +} + +type DockerfilePluginOverride struct { + + // Path of source directory to establish build context. Defaults to ${PROJECT_ROOT} in the container + // +optional + BuildContext string `json:"buildContext,omitempty"` + + // The arguments to supply to the dockerfile build. + // +optional + Args []string `json:"args,omitempty" patchStrategy:"replace"` + + // Specify if a privileged builder pod is required. + // + // Default value is `false` + // +optional + RootRequired *bool `json:"rootRequired,omitempty"` +} // CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug +// +kubebuilder:validation:Enum=build;run;test;debug;deploy type CommandGroupKindPluginOverride string +// DockerfileSrcType describes the type of +// the src for the Dockerfile outerloop build. +// Only one of the following location type may be specified. +type DockerfileSrcTypePluginOverride string + +type DockerfileDevfileRegistrySourcePluginOverride struct { + + // +optional + // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry + // required for the Dockerfile build will be downloaded for building the image. + Id string `json:"id,omitempty"` + + // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. + // To ensure the Dockerfile gets resolved consistently in different environments, + // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. + // +optional + RegistryUrl string `json:"registryUrl,omitempty"` +} + +type DockerfileGitProjectSourcePluginOverride struct { + + // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be + // cloned for building the image. + GitProjectSourcePluginOverride `json:",inline"` + + // Location of the Dockerfile in the Git repository when using git as Dockerfile src. + // Defaults to Dockerfile. + // +optional + FileLocation string `json:"fileLocation,omitempty"` +} + +type GitProjectSourcePluginOverride struct { + GitLikeProjectSourcePluginOverride `json:",inline"` +} + +type GitLikeProjectSourcePluginOverride struct { + CommonProjectSourcePluginOverride `json:",inline"` + + // Defines from what the project should be checked out. Required if there are more than one remote configured + // +optional + CheckoutFrom *CheckoutFromPluginOverride `json:"checkoutFrom,omitempty"` + + // +optional + // The remotes map which should be initialized in the git project. + // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. + Remotes map[string]string `json:"remotes,omitempty"` +} + +type CommonProjectSourcePluginOverride struct { +} + +type CheckoutFromPluginOverride struct { + + // The revision to checkout from. Should be branch name, tag or commit id. + // Default branch is used if missing or specified revision is not found. + // +optional + Revision string `json:"revision,omitempty"` + + // The remote name should be used as init. Required if there are more than one remote configured + // +optional + Remote string `json:"remote,omitempty"` +} + func (overrides PluginOverrides) isOverride() {} diff --git a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go index 406ca523f67..3efd841aebc 100644 --- a/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go +++ b/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go @@ -21,31 +21,71 @@ func (union *CommandUnion) Simplify() { // +k8s:deepcopy-gen=false type CommandUnionVisitor struct { - Exec func(*ExecCommand) error - Apply func(*ApplyCommand) error - VscodeTask func(*VscodeConfigurationCommand) error - VscodeLaunch func(*VscodeConfigurationCommand) error - Composite func(*CompositeCommand) error - Custom func(*CustomCommand) error + Exec func(*ExecCommand) error + Apply func(*ApplyCommand) error + Composite func(*CompositeCommand) error + Custom func(*CustomCommand) error } -var vscodeConfigurationCommandLocation reflect.Type = reflect.TypeOf(VscodeConfigurationCommandLocationVisitor{}) +var imageUnion reflect.Type = reflect.TypeOf(ImageUnionVisitor{}) -func (union VscodeConfigurationCommandLocation) Visit(visitor VscodeConfigurationCommandLocationVisitor) error { +func (union ImageUnion) Visit(visitor ImageUnionVisitor) error { return visitUnion(union, visitor) } -func (union *VscodeConfigurationCommandLocation) discriminator() *string { +func (union *ImageUnion) discriminator() *string { + return (*string)(&union.ImageType) +} +func (union *ImageUnion) Normalize() error { + return normalizeUnion(union, imageUnion) +} +func (union *ImageUnion) Simplify() { + simplifyUnion(union, imageUnion) +} + +// +k8s:deepcopy-gen=false +type ImageUnionVisitor struct { + Dockerfile func(*DockerfileImage) error +} + +var dockerfileSrc reflect.Type = reflect.TypeOf(DockerfileSrcVisitor{}) + +func (union DockerfileSrc) Visit(visitor DockerfileSrcVisitor) error { + return visitUnion(union, visitor) +} +func (union *DockerfileSrc) discriminator() *string { + return (*string)(&union.SrcType) +} +func (union *DockerfileSrc) Normalize() error { + return normalizeUnion(union, dockerfileSrc) +} +func (union *DockerfileSrc) Simplify() { + simplifyUnion(union, dockerfileSrc) +} + +// +k8s:deepcopy-gen=false +type DockerfileSrcVisitor struct { + Uri func(string) error + DevfileRegistry func(*DockerfileDevfileRegistrySource) error + Git func(*DockerfileGitProjectSource) error +} + +var k8sLikeComponentLocation reflect.Type = reflect.TypeOf(K8sLikeComponentLocationVisitor{}) + +func (union K8sLikeComponentLocation) Visit(visitor K8sLikeComponentLocationVisitor) error { + return visitUnion(union, visitor) +} +func (union *K8sLikeComponentLocation) discriminator() *string { return (*string)(&union.LocationType) } -func (union *VscodeConfigurationCommandLocation) Normalize() error { - return normalizeUnion(union, vscodeConfigurationCommandLocation) +func (union *K8sLikeComponentLocation) Normalize() error { + return normalizeUnion(union, k8sLikeComponentLocation) } -func (union *VscodeConfigurationCommandLocation) Simplify() { - simplifyUnion(union, vscodeConfigurationCommandLocation) +func (union *K8sLikeComponentLocation) Simplify() { + simplifyUnion(union, k8sLikeComponentLocation) } // +k8s:deepcopy-gen=false -type VscodeConfigurationCommandLocationVisitor struct { +type K8sLikeComponentLocationVisitor struct { Uri func(string) error Inlined func(string) error } @@ -71,6 +111,7 @@ type ComponentUnionVisitor struct { Kubernetes func(*KubernetesComponent) error Openshift func(*OpenshiftComponent) error Volume func(*VolumeComponent) error + Image func(*ImageComponent) error Plugin func(*PluginComponent) error Custom func(*CustomComponent) error } @@ -97,27 +138,6 @@ type ImportReferenceUnionVisitor struct { Kubernetes func(*KubernetesCustomResourceImportReference) error } -var k8sLikeComponentLocation reflect.Type = reflect.TypeOf(K8sLikeComponentLocationVisitor{}) - -func (union K8sLikeComponentLocation) Visit(visitor K8sLikeComponentLocationVisitor) error { - return visitUnion(union, visitor) -} -func (union *K8sLikeComponentLocation) discriminator() *string { - return (*string)(&union.LocationType) -} -func (union *K8sLikeComponentLocation) Normalize() error { - return normalizeUnion(union, k8sLikeComponentLocation) -} -func (union *K8sLikeComponentLocation) Simplify() { - simplifyUnion(union, k8sLikeComponentLocation) -} - -// +k8s:deepcopy-gen=false -type K8sLikeComponentLocationVisitor struct { - Uri func(string) error - Inlined func(string) error -} - var projectSource reflect.Type = reflect.TypeOf(ProjectSourceVisitor{}) func (union ProjectSource) Visit(visitor ProjectSourceVisitor) error { @@ -136,7 +156,6 @@ func (union *ProjectSource) Simplify() { // +k8s:deepcopy-gen=false type ProjectSourceVisitor struct { Git func(*GitProjectSource) error - Github func(*GithubProjectSource) error Zip func(*ZipProjectSource) error Custom func(*CustomProjectSource) error } @@ -162,6 +181,7 @@ type ComponentUnionParentOverrideVisitor struct { Kubernetes func(*KubernetesComponentParentOverride) error Openshift func(*OpenshiftComponentParentOverride) error Volume func(*VolumeComponentParentOverride) error + Image func(*ImageComponentParentOverride) error Plugin func(*PluginComponentParentOverride) error } @@ -182,9 +202,8 @@ func (union *ProjectSourceParentOverride) Simplify() { // +k8s:deepcopy-gen=false type ProjectSourceParentOverrideVisitor struct { - Git func(*GitProjectSourceParentOverride) error - Github func(*GithubProjectSourceParentOverride) error - Zip func(*ZipProjectSourceParentOverride) error + Git func(*GitProjectSourceParentOverride) error + Zip func(*ZipProjectSourceParentOverride) error } var commandUnionParentOverride reflect.Type = reflect.TypeOf(CommandUnionParentOverrideVisitor{}) @@ -204,53 +223,50 @@ func (union *CommandUnionParentOverride) Simplify() { // +k8s:deepcopy-gen=false type CommandUnionParentOverrideVisitor struct { - Exec func(*ExecCommandParentOverride) error - Apply func(*ApplyCommandParentOverride) error - VscodeTask func(*VscodeConfigurationCommandParentOverride) error - VscodeLaunch func(*VscodeConfigurationCommandParentOverride) error - Composite func(*CompositeCommandParentOverride) error + Exec func(*ExecCommandParentOverride) error + Apply func(*ApplyCommandParentOverride) error + Composite func(*CompositeCommandParentOverride) error } -var vscodeConfigurationCommandLocationParentOverride reflect.Type = reflect.TypeOf(VscodeConfigurationCommandLocationParentOverrideVisitor{}) +var k8sLikeComponentLocationParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationParentOverrideVisitor{}) -func (union VscodeConfigurationCommandLocationParentOverride) Visit(visitor VscodeConfigurationCommandLocationParentOverrideVisitor) error { +func (union K8sLikeComponentLocationParentOverride) Visit(visitor K8sLikeComponentLocationParentOverrideVisitor) error { return visitUnion(union, visitor) } -func (union *VscodeConfigurationCommandLocationParentOverride) discriminator() *string { +func (union *K8sLikeComponentLocationParentOverride) discriminator() *string { return (*string)(&union.LocationType) } -func (union *VscodeConfigurationCommandLocationParentOverride) Normalize() error { - return normalizeUnion(union, vscodeConfigurationCommandLocationParentOverride) +func (union *K8sLikeComponentLocationParentOverride) Normalize() error { + return normalizeUnion(union, k8sLikeComponentLocationParentOverride) } -func (union *VscodeConfigurationCommandLocationParentOverride) Simplify() { - simplifyUnion(union, vscodeConfigurationCommandLocationParentOverride) +func (union *K8sLikeComponentLocationParentOverride) Simplify() { + simplifyUnion(union, k8sLikeComponentLocationParentOverride) } // +k8s:deepcopy-gen=false -type VscodeConfigurationCommandLocationParentOverrideVisitor struct { +type K8sLikeComponentLocationParentOverrideVisitor struct { Uri func(string) error Inlined func(string) error } -var k8sLikeComponentLocationParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationParentOverrideVisitor{}) +var imageUnionParentOverride reflect.Type = reflect.TypeOf(ImageUnionParentOverrideVisitor{}) -func (union K8sLikeComponentLocationParentOverride) Visit(visitor K8sLikeComponentLocationParentOverrideVisitor) error { +func (union ImageUnionParentOverride) Visit(visitor ImageUnionParentOverrideVisitor) error { return visitUnion(union, visitor) } -func (union *K8sLikeComponentLocationParentOverride) discriminator() *string { - return (*string)(&union.LocationType) +func (union *ImageUnionParentOverride) discriminator() *string { + return (*string)(&union.ImageType) } -func (union *K8sLikeComponentLocationParentOverride) Normalize() error { - return normalizeUnion(union, k8sLikeComponentLocationParentOverride) +func (union *ImageUnionParentOverride) Normalize() error { + return normalizeUnion(union, imageUnionParentOverride) } -func (union *K8sLikeComponentLocationParentOverride) Simplify() { - simplifyUnion(union, k8sLikeComponentLocationParentOverride) +func (union *ImageUnionParentOverride) Simplify() { + simplifyUnion(union, imageUnionParentOverride) } // +k8s:deepcopy-gen=false -type K8sLikeComponentLocationParentOverrideVisitor struct { - Uri func(string) error - Inlined func(string) error +type ImageUnionParentOverrideVisitor struct { + Dockerfile func(*DockerfileImageParentOverride) error } var importReferenceUnionParentOverride reflect.Type = reflect.TypeOf(ImportReferenceUnionParentOverrideVisitor{}) @@ -296,6 +312,7 @@ type ComponentUnionPluginOverrideParentOverrideVisitor struct { Kubernetes func(*KubernetesComponentPluginOverrideParentOverride) error Openshift func(*OpenshiftComponentPluginOverrideParentOverride) error Volume func(*VolumeComponentPluginOverrideParentOverride) error + Image func(*ImageComponentPluginOverrideParentOverride) error } var commandUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideParentOverrideVisitor{}) @@ -315,32 +332,31 @@ func (union *CommandUnionPluginOverrideParentOverride) Simplify() { // +k8s:deepcopy-gen=false type CommandUnionPluginOverrideParentOverrideVisitor struct { - Exec func(*ExecCommandPluginOverrideParentOverride) error - Apply func(*ApplyCommandPluginOverrideParentOverride) error - VscodeTask func(*VscodeConfigurationCommandPluginOverrideParentOverride) error - VscodeLaunch func(*VscodeConfigurationCommandPluginOverrideParentOverride) error - Composite func(*CompositeCommandPluginOverrideParentOverride) error + Exec func(*ExecCommandPluginOverrideParentOverride) error + Apply func(*ApplyCommandPluginOverrideParentOverride) error + Composite func(*CompositeCommandPluginOverrideParentOverride) error } -var vscodeConfigurationCommandLocationPluginOverrideParentOverride reflect.Type = reflect.TypeOf(VscodeConfigurationCommandLocationPluginOverrideParentOverrideVisitor{}) +var dockerfileSrcParentOverride reflect.Type = reflect.TypeOf(DockerfileSrcParentOverrideVisitor{}) -func (union VscodeConfigurationCommandLocationPluginOverrideParentOverride) Visit(visitor VscodeConfigurationCommandLocationPluginOverrideParentOverrideVisitor) error { +func (union DockerfileSrcParentOverride) Visit(visitor DockerfileSrcParentOverrideVisitor) error { return visitUnion(union, visitor) } -func (union *VscodeConfigurationCommandLocationPluginOverrideParentOverride) discriminator() *string { - return (*string)(&union.LocationType) +func (union *DockerfileSrcParentOverride) discriminator() *string { + return (*string)(&union.SrcType) } -func (union *VscodeConfigurationCommandLocationPluginOverrideParentOverride) Normalize() error { - return normalizeUnion(union, vscodeConfigurationCommandLocationPluginOverrideParentOverride) +func (union *DockerfileSrcParentOverride) Normalize() error { + return normalizeUnion(union, dockerfileSrcParentOverride) } -func (union *VscodeConfigurationCommandLocationPluginOverrideParentOverride) Simplify() { - simplifyUnion(union, vscodeConfigurationCommandLocationPluginOverrideParentOverride) +func (union *DockerfileSrcParentOverride) Simplify() { + simplifyUnion(union, dockerfileSrcParentOverride) } // +k8s:deepcopy-gen=false -type VscodeConfigurationCommandLocationPluginOverrideParentOverrideVisitor struct { - Uri func(string) error - Inlined func(string) error +type DockerfileSrcParentOverrideVisitor struct { + Uri func(string) error + DevfileRegistry func(*DockerfileDevfileRegistrySourceParentOverride) error + Git func(*DockerfileGitProjectSourceParentOverride) error } var k8sLikeComponentLocationPluginOverrideParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideParentOverrideVisitor{}) @@ -364,6 +380,48 @@ type K8sLikeComponentLocationPluginOverrideParentOverrideVisitor struct { Inlined func(string) error } +var imageUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(ImageUnionPluginOverrideParentOverrideVisitor{}) + +func (union ImageUnionPluginOverrideParentOverride) Visit(visitor ImageUnionPluginOverrideParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *ImageUnionPluginOverrideParentOverride) discriminator() *string { + return (*string)(&union.ImageType) +} +func (union *ImageUnionPluginOverrideParentOverride) Normalize() error { + return normalizeUnion(union, imageUnionPluginOverrideParentOverride) +} +func (union *ImageUnionPluginOverrideParentOverride) Simplify() { + simplifyUnion(union, imageUnionPluginOverrideParentOverride) +} + +// +k8s:deepcopy-gen=false +type ImageUnionPluginOverrideParentOverrideVisitor struct { + Dockerfile func(*DockerfileImagePluginOverrideParentOverride) error +} + +var dockerfileSrcPluginOverrideParentOverride reflect.Type = reflect.TypeOf(DockerfileSrcPluginOverrideParentOverrideVisitor{}) + +func (union DockerfileSrcPluginOverrideParentOverride) Visit(visitor DockerfileSrcPluginOverrideParentOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *DockerfileSrcPluginOverrideParentOverride) discriminator() *string { + return (*string)(&union.SrcType) +} +func (union *DockerfileSrcPluginOverrideParentOverride) Normalize() error { + return normalizeUnion(union, dockerfileSrcPluginOverrideParentOverride) +} +func (union *DockerfileSrcPluginOverrideParentOverride) Simplify() { + simplifyUnion(union, dockerfileSrcPluginOverrideParentOverride) +} + +// +k8s:deepcopy-gen=false +type DockerfileSrcPluginOverrideParentOverrideVisitor struct { + Uri func(string) error + DevfileRegistry func(*DockerfileDevfileRegistrySourcePluginOverrideParentOverride) error + Git func(*DockerfileGitProjectSourcePluginOverrideParentOverride) error +} + var componentUnionPluginOverride reflect.Type = reflect.TypeOf(ComponentUnionPluginOverrideVisitor{}) func (union ComponentUnionPluginOverride) Visit(visitor ComponentUnionPluginOverrideVisitor) error { @@ -385,6 +443,7 @@ type ComponentUnionPluginOverrideVisitor struct { Kubernetes func(*KubernetesComponentPluginOverride) error Openshift func(*OpenshiftComponentPluginOverride) error Volume func(*VolumeComponentPluginOverride) error + Image func(*ImageComponentPluginOverride) error } var commandUnionPluginOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideVisitor{}) @@ -404,51 +463,70 @@ func (union *CommandUnionPluginOverride) Simplify() { // +k8s:deepcopy-gen=false type CommandUnionPluginOverrideVisitor struct { - Exec func(*ExecCommandPluginOverride) error - Apply func(*ApplyCommandPluginOverride) error - VscodeTask func(*VscodeConfigurationCommandPluginOverride) error - VscodeLaunch func(*VscodeConfigurationCommandPluginOverride) error - Composite func(*CompositeCommandPluginOverride) error + Exec func(*ExecCommandPluginOverride) error + Apply func(*ApplyCommandPluginOverride) error + Composite func(*CompositeCommandPluginOverride) error } -var vscodeConfigurationCommandLocationPluginOverride reflect.Type = reflect.TypeOf(VscodeConfigurationCommandLocationPluginOverrideVisitor{}) +var k8sLikeComponentLocationPluginOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideVisitor{}) -func (union VscodeConfigurationCommandLocationPluginOverride) Visit(visitor VscodeConfigurationCommandLocationPluginOverrideVisitor) error { +func (union K8sLikeComponentLocationPluginOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideVisitor) error { return visitUnion(union, visitor) } -func (union *VscodeConfigurationCommandLocationPluginOverride) discriminator() *string { +func (union *K8sLikeComponentLocationPluginOverride) discriminator() *string { return (*string)(&union.LocationType) } -func (union *VscodeConfigurationCommandLocationPluginOverride) Normalize() error { - return normalizeUnion(union, vscodeConfigurationCommandLocationPluginOverride) +func (union *K8sLikeComponentLocationPluginOverride) Normalize() error { + return normalizeUnion(union, k8sLikeComponentLocationPluginOverride) } -func (union *VscodeConfigurationCommandLocationPluginOverride) Simplify() { - simplifyUnion(union, vscodeConfigurationCommandLocationPluginOverride) +func (union *K8sLikeComponentLocationPluginOverride) Simplify() { + simplifyUnion(union, k8sLikeComponentLocationPluginOverride) } // +k8s:deepcopy-gen=false -type VscodeConfigurationCommandLocationPluginOverrideVisitor struct { +type K8sLikeComponentLocationPluginOverrideVisitor struct { Uri func(string) error Inlined func(string) error } -var k8sLikeComponentLocationPluginOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideVisitor{}) +var imageUnionPluginOverride reflect.Type = reflect.TypeOf(ImageUnionPluginOverrideVisitor{}) -func (union K8sLikeComponentLocationPluginOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideVisitor) error { +func (union ImageUnionPluginOverride) Visit(visitor ImageUnionPluginOverrideVisitor) error { return visitUnion(union, visitor) } -func (union *K8sLikeComponentLocationPluginOverride) discriminator() *string { - return (*string)(&union.LocationType) +func (union *ImageUnionPluginOverride) discriminator() *string { + return (*string)(&union.ImageType) } -func (union *K8sLikeComponentLocationPluginOverride) Normalize() error { - return normalizeUnion(union, k8sLikeComponentLocationPluginOverride) +func (union *ImageUnionPluginOverride) Normalize() error { + return normalizeUnion(union, imageUnionPluginOverride) } -func (union *K8sLikeComponentLocationPluginOverride) Simplify() { - simplifyUnion(union, k8sLikeComponentLocationPluginOverride) +func (union *ImageUnionPluginOverride) Simplify() { + simplifyUnion(union, imageUnionPluginOverride) } // +k8s:deepcopy-gen=false -type K8sLikeComponentLocationPluginOverrideVisitor struct { - Uri func(string) error - Inlined func(string) error +type ImageUnionPluginOverrideVisitor struct { + Dockerfile func(*DockerfileImagePluginOverride) error +} + +var dockerfileSrcPluginOverride reflect.Type = reflect.TypeOf(DockerfileSrcPluginOverrideVisitor{}) + +func (union DockerfileSrcPluginOverride) Visit(visitor DockerfileSrcPluginOverrideVisitor) error { + return visitUnion(union, visitor) +} +func (union *DockerfileSrcPluginOverride) discriminator() *string { + return (*string)(&union.SrcType) +} +func (union *DockerfileSrcPluginOverride) Normalize() error { + return normalizeUnion(union, dockerfileSrcPluginOverride) +} +func (union *DockerfileSrcPluginOverride) Simplify() { + simplifyUnion(union, dockerfileSrcPluginOverride) +} + +// +k8s:deepcopy-gen=false +type DockerfileSrcPluginOverrideVisitor struct { + Uri func(string) error + DevfileRegistry func(*DockerfileDevfileRegistrySourcePluginOverride) error + Git func(*DockerfileGitProjectSourcePluginOverride) error } diff --git a/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go b/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go index 96186c47717..43c77c4e6ee 100644 --- a/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go +++ b/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go @@ -19,7 +19,7 @@ func (attributes Attributes) MarshalJSON() ([]byte, error) { return json.Marshal(map[string]apiext.JSON(attributes)) } -// UnmarshalJSON implements custom JSON unmarshaling +// UnmarshalJSON implements custom JSON unmarshalling // to support free-form attributes func (attributes *Attributes) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, (*map[string]apiext.JSON)(attributes)) diff --git a/vendor/github.com/devfile/api/v2/pkg/devfile/header.go b/vendor/github.com/devfile/api/v2/pkg/devfile/header.go index 23cccd04b9b..6606b068ae1 100644 --- a/vendor/github.com/devfile/api/v2/pkg/devfile/header.go +++ b/vendor/github.com/devfile/api/v2/pkg/devfile/header.go @@ -17,6 +17,17 @@ type DevfileHeader struct { Metadata DevfileMetadata `json:"metadata,omitempty"` } +// Architecture describes the architecture type +// +kubebuilder:validation:Enum=amd64;arm64;ppc64le;s390x +type Architecture string + +const ( + AMD64 Architecture = "amd64" + ARM64 Architecture = "arm64" + PPC64LE Architecture = "ppc64le" + S390X Architecture = "s390x" +) + type DevfileMetadata struct { // Optional devfile name // +optional @@ -27,8 +38,11 @@ type DevfileMetadata struct { // +kubebuilder:validation:Pattern=^([0-9]+)\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$ Version string `json:"version,omitempty"` - // Map of implementation-dependant free-form YAML attributes. + // Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead. // +optional + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + // +kubebuilder:validation:Schemaless Attributes attributes.Attributes `json:"attributes,omitempty"` // Optional devfile display name @@ -43,11 +57,36 @@ type DevfileMetadata struct { // +optional Tags []string `json:"tags,omitempty"` - // Optional devfile icon + // Optional list of processor architectures that the devfile supports, empty list suggests that the devfile can be used on any architecture + // +optional + // +kubebuilder:validation:UniqueItems=true + Architectures []Architecture `json:"architectures,omitempty"` + + // Optional devfile icon, can be a URI or a relative path in the project // +optional Icon string `json:"icon,omitempty"` // Optional devfile global memory limit // +optional GlobalMemoryLimit string `json:"globalMemoryLimit,omitempty"` + + // Optional devfile project type + // +optional + ProjectType string `json:"projectType,omitempty"` + + // Optional devfile language + // +optional + Language string `json:"language,omitempty"` + + // Optional devfile website + // +optional + Website string `json:"website,omitempty"` + + // Optional devfile provider information + // +optional + Provider string `json:"provider,omitempty"` + + // Optional link to a page that provides support information + // +optional + SupportUrl string `json:"supportUrl,omitempty"` } diff --git a/vendor/github.com/devfile/api/v2/pkg/utils/overriding/keys.go b/vendor/github.com/devfile/api/v2/pkg/utils/overriding/keys.go index cca05ca9685..b525e8a7b61 100644 --- a/vendor/github.com/devfile/api/v2/pkg/utils/overriding/keys.go +++ b/vendor/github.com/devfile/api/v2/pkg/utils/overriding/keys.go @@ -1,7 +1,11 @@ package overriding import ( - workspaces "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "fmt" + "reflect" + + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + attributesPkg "github.com/devfile/api/v2/pkg/attributes" "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/util/sets" ) @@ -14,7 +18,7 @@ type checkFn func(elementType string, keysSets []sets.String) []error // For each type of top-level list, the `keysSets` argument that will be passed to the `doCheck` function // contains the the key sets that correspond to the `toplevelListContainers` passed to this method, // in the same order. -func checkKeys(doCheck checkFn, toplevelListContainers ...workspaces.TopLevelListContainer) error { +func checkKeys(doCheck checkFn, toplevelListContainers ...dw.TopLevelListContainer) error { var errors *multierror.Error // intermediate storage for the conversion []map[string]KeyedList -> map[string][]sets.String @@ -27,7 +31,49 @@ func checkKeys(doCheck checkFn, toplevelListContainers ...workspaces.TopLevelLis for listType, listElem := range topLevelList { listTypeToKeys[listType] = append(listTypeToKeys[listType], sets.NewString(listElem.GetKeys()...)) } + + value := reflect.ValueOf(topLevelListContainer) + + var variableValue reflect.Value + var attributeValue reflect.Value + + // toplevelListContainers can contain either a pointer or a struct and needs to be safeguarded when using reflect + if value.Kind() == reflect.Ptr { + variableValue = value.Elem().FieldByName("Variables") + attributeValue = value.Elem().FieldByName("Attributes") + } else { + variableValue = value.FieldByName("Variables") + attributeValue = value.FieldByName("Attributes") + } + + if variableValue.IsValid() && variableValue.Kind() == reflect.Map { + mapIter := variableValue.MapRange() + + var variableKeys []string + for mapIter.Next() { + k := mapIter.Key() + v := mapIter.Value() + if k.Kind() != reflect.String || v.Kind() != reflect.String { + return fmt.Errorf("unable to fetch top-level Variables, top-level Variables should be map of strings") + } + variableKeys = append(variableKeys, k.String()) + } + listTypeToKeys["Variables"] = append(listTypeToKeys["Variables"], sets.NewString(variableKeys...)) + } + + if attributeValue.IsValid() && attributeValue.CanInterface() { + attributes, ok := attributeValue.Interface().(attributesPkg.Attributes) + if !ok { + return fmt.Errorf("unable to fetch top-level Attributes from the devfile data") + } + var attributeKeys []string + for k := range attributes { + attributeKeys = append(attributeKeys, k) + } + listTypeToKeys["Attributes"] = append(listTypeToKeys["Attributes"], sets.NewString(attributeKeys...)) + } } + for listType, keySets := range listTypeToKeys { errors = multierror.Append(errors, doCheck(listType, keySets)...) } diff --git a/vendor/github.com/devfile/api/v2/pkg/utils/overriding/merging.go b/vendor/github.com/devfile/api/v2/pkg/utils/overriding/merging.go index 2ce6fc60ce8..279e54de91a 100644 --- a/vendor/github.com/devfile/api/v2/pkg/utils/overriding/merging.go +++ b/vendor/github.com/devfile/api/v2/pkg/utils/overriding/merging.go @@ -5,7 +5,8 @@ import ( "reflect" "strings" - workspaces "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/api/v2/pkg/attributes" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/yaml" @@ -21,11 +22,11 @@ import ( // The result is a transformed `DevWorkspaceTemplateSpec` object, that does not contain any `plugin` component // (since they are expected to be provided as flattened overridden devfiles in the arguments) func MergeDevWorkspaceTemplateSpec( - mainContent *workspaces.DevWorkspaceTemplateSpecContent, - parentFlattenedContent *workspaces.DevWorkspaceTemplateSpecContent, - pluginFlattenedContents ...*workspaces.DevWorkspaceTemplateSpecContent) (*workspaces.DevWorkspaceTemplateSpecContent, error) { + mainContent *dw.DevWorkspaceTemplateSpecContent, + parentFlattenedContent *dw.DevWorkspaceTemplateSpecContent, + pluginFlattenedContents ...*dw.DevWorkspaceTemplateSpecContent) (*dw.DevWorkspaceTemplateSpecContent, error) { - allContents := []*workspaces.DevWorkspaceTemplateSpecContent{} + allContents := []*dw.DevWorkspaceTemplateSpecContent{} if parentFlattenedContent != nil { allContents = append(allContents, parentFlattenedContent) } @@ -52,12 +53,12 @@ func MergeDevWorkspaceTemplateSpec( } } - result := workspaces.DevWorkspaceTemplateSpecContent{} + result := dw.DevWorkspaceTemplateSpecContent{} // Merge top-level lists (Commands, Projects, Components, etc ...) topLevelListsNames := result.GetToplevelLists() - topLevelListsByContent := []workspaces.TopLevelLists{} + topLevelListsByContent := []dw.TopLevelLists{} for _, content := range allContents { topLevelListsByContent = append(topLevelListsByContent, content.GetToplevelLists()) } @@ -78,7 +79,7 @@ func MergeDevWorkspaceTemplateSpec( keyedList := toplevelLists[toplevelListName] for _, keyed := range keyedList { if content == mainContent { - if component, isComponent := keyed.(workspaces.Component); isComponent && + if component, isComponent := keyed.(dw.Component); isComponent && component.Plugin != nil { continue } @@ -98,13 +99,35 @@ func MergeDevWorkspaceTemplateSpec( for _, content := range allContents { if content.Events != nil { if result.Events == nil { - result.Events = &workspaces.Events{} + result.Events = &dw.Events{} } preStartCommands = preStartCommands.Union(sets.NewString(content.Events.PreStart...)) postStartCommands = postStartCommands.Union(sets.NewString(content.Events.PostStart...)) preStopCommands = preStopCommands.Union(sets.NewString(content.Events.PreStop...)) postStopCommands = postStopCommands.Union(sets.NewString(content.Events.PostStop...)) } + + if len(content.Variables) > 0 { + if len(result.Variables) == 0 { + result.Variables = make(map[string]string) + } + for k, v := range content.Variables { + result.Variables[k] = v + } + } + + var err error + if len(content.Attributes) > 0 { + if len(result.Attributes) == 0 { + result.Attributes = attributes.Attributes{} + } + for k, v := range content.Attributes { + result.Attributes.Put(k, v, &err) + if err != nil { + return nil, err + } + } + } } if result.Events != nil { @@ -126,13 +149,13 @@ func MergeDevWorkspaceTemplateSpec( // // The result is a transformed `DevfileWorkspaceTemplateSpec` object, that does not contain any `plugin` component // (since they are expected to be provided as flattened overridden devfiles in the arguments) -func MergeDevWorkspaceTemplateSpecBytes(originalBytes []byte, flattenedParentBytes []byte, flattenPluginsBytes ...[]byte) (*workspaces.DevWorkspaceTemplateSpecContent, error) { +func MergeDevWorkspaceTemplateSpecBytes(originalBytes []byte, flattenedParentBytes []byte, flattenPluginsBytes ...[]byte) (*dw.DevWorkspaceTemplateSpecContent, error) { originalJson, err := yaml.ToJSON(originalBytes) if err != nil { return nil, err } - original := workspaces.DevWorkspaceTemplateSpecContent{} + original := dw.DevWorkspaceTemplateSpecContent{} err = json.Unmarshal(originalJson, &original) if err != nil { return nil, err @@ -143,20 +166,20 @@ func MergeDevWorkspaceTemplateSpecBytes(originalBytes []byte, flattenedParentByt return nil, err } - flattenedParent := workspaces.DevWorkspaceTemplateSpecContent{} + flattenedParent := dw.DevWorkspaceTemplateSpecContent{} err = json.Unmarshal(flattenedParentJson, &flattenedParent) if err != nil { return nil, err } - flattenedPlugins := []*workspaces.DevWorkspaceTemplateSpecContent{} + flattenedPlugins := []*dw.DevWorkspaceTemplateSpecContent{} for _, flattenedPluginBytes := range flattenPluginsBytes { flattenedPluginJson, err := yaml.ToJSON(flattenedPluginBytes) if err != nil { return nil, err } - flattenedPlugin := workspaces.DevWorkspaceTemplateSpecContent{} + flattenedPlugin := dw.DevWorkspaceTemplateSpecContent{} err = json.Unmarshal(flattenedPluginJson, &flattenedPlugin) if err != nil { return nil, err @@ -167,7 +190,7 @@ func MergeDevWorkspaceTemplateSpecBytes(originalBytes []byte, flattenedParentByt return MergeDevWorkspaceTemplateSpec(&original, &flattenedParent, flattenedPlugins...) } -func ensureNoConflictWithParent(mainContent *workspaces.DevWorkspaceTemplateSpecContent, parentflattenedContent *workspaces.DevWorkspaceTemplateSpecContent) error { +func ensureNoConflictWithParent(mainContent *dw.DevWorkspaceTemplateSpecContent, parentflattenedContent *dw.DevWorkspaceTemplateSpecContent) error { return checkKeys(func(elementType string, keysSets []sets.String) []error { mainKeys := keysSets[0] parentOrPluginKeys := keysSets[1] @@ -183,7 +206,7 @@ func ensureNoConflictWithParent(mainContent *workspaces.DevWorkspaceTemplateSpec mainContent, parentflattenedContent) } -func ensureNoConflictsWithPlugins(mainContent *workspaces.DevWorkspaceTemplateSpecContent, pluginFlattenedContents ...*workspaces.DevWorkspaceTemplateSpecContent) error { +func ensureNoConflictsWithPlugins(mainContent *dw.DevWorkspaceTemplateSpecContent, pluginFlattenedContents ...*dw.DevWorkspaceTemplateSpecContent) error { getPluginKey := func(pluginIndex int) string { index := 0 for _, comp := range mainContent.Components { @@ -197,7 +220,7 @@ func ensureNoConflictsWithPlugins(mainContent *workspaces.DevWorkspaceTemplateSp return "unknown" } - allSpecs := []workspaces.TopLevelListContainer{mainContent} + allSpecs := []dw.TopLevelListContainer{mainContent} for _, pluginFlattenedContent := range pluginFlattenedContents { allSpecs = append(allSpecs, pluginFlattenedContent) } diff --git a/vendor/github.com/devfile/api/v2/pkg/utils/overriding/overriding.go b/vendor/github.com/devfile/api/v2/pkg/utils/overriding/overriding.go index 6eba4c41784..1a6f1d25cd2 100644 --- a/vendor/github.com/devfile/api/v2/pkg/utils/overriding/overriding.go +++ b/vendor/github.com/devfile/api/v2/pkg/utils/overriding/overriding.go @@ -5,7 +5,7 @@ import ( "reflect" "strings" - workspaces "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" unions "github.com/devfile/api/v2/pkg/utils/unions" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/sets" @@ -21,13 +21,13 @@ import ( // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#background // // The result is a transformed `DevfileWorkspaceTemplateSpec` object that can be serialized back to yaml or json. -func OverrideDevWorkspaceTemplateSpecBytes(originalBytes []byte, patchBytes []byte) (*workspaces.DevWorkspaceTemplateSpecContent, error) { +func OverrideDevWorkspaceTemplateSpecBytes(originalBytes []byte, patchBytes []byte) (*dw.DevWorkspaceTemplateSpecContent, error) { originalJson, err := yaml.ToJSON(originalBytes) if err != nil { return nil, err } - original := workspaces.DevWorkspaceTemplateSpecContent{} + original := dw.DevWorkspaceTemplateSpecContent{} err = json.Unmarshal(originalJson, &original) if err != nil { return nil, err @@ -38,7 +38,7 @@ func OverrideDevWorkspaceTemplateSpecBytes(originalBytes []byte, patchBytes []by return nil, err } - patch := workspaces.ParentOverrides{} + patch := dw.ParentOverrides{} err = json.Unmarshal(patchJson, &patch) if err != nil { return nil, err @@ -56,7 +56,7 @@ func OverrideDevWorkspaceTemplateSpecBytes(originalBytes []byte, patchBytes []by // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#background // // The result is a transformed `DevfileWorkspaceTemplateSpec` object. -func OverrideDevWorkspaceTemplateSpec(original *workspaces.DevWorkspaceTemplateSpecContent, patch workspaces.Overrides) (*workspaces.DevWorkspaceTemplateSpecContent, error) { +func OverrideDevWorkspaceTemplateSpec(original *dw.DevWorkspaceTemplateSpecContent, patch dw.Overrides) (*dw.DevWorkspaceTemplateSpecContent, error) { if err := ensureOnlyExistingElementsAreOverridden(original, patch); err != nil { return nil, err } @@ -102,7 +102,7 @@ func OverrideDevWorkspaceTemplateSpec(original *workspaces.DevWorkspaceTemplateS return nil, err } - patched := workspaces.DevWorkspaceTemplateSpecContent{} + patched := dw.DevWorkspaceTemplateSpecContent{} err = json.Unmarshal(patchedBytes, &patched) if err != nil { return nil, err @@ -114,7 +114,7 @@ func OverrideDevWorkspaceTemplateSpec(original *workspaces.DevWorkspaceTemplateS return &patched, nil } -func ensureOnlyExistingElementsAreOverridden(spec *workspaces.DevWorkspaceTemplateSpecContent, overrides workspaces.Overrides) error { +func ensureOnlyExistingElementsAreOverridden(spec *dw.DevWorkspaceTemplateSpecContent, overrides dw.Overrides) error { return checkKeys(func(elementType string, keysSets []sets.String) []error { if len(keysSets) <= 1 { return []error{} diff --git a/vendor/github.com/devfile/api/v2/pkg/utils/unions/normalize.go b/vendor/github.com/devfile/api/v2/pkg/utils/unions/normalize.go index 25ee6725055..96d6e486eae 100644 --- a/vendor/github.com/devfile/api/v2/pkg/utils/unions/normalize.go +++ b/vendor/github.com/devfile/api/v2/pkg/utils/unions/normalize.go @@ -3,7 +3,7 @@ package unions import ( "reflect" - workspaces "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/mitchellh/reflectwalk" ) @@ -15,7 +15,7 @@ func (n *normalizer) Struct(s reflect.Value) error { addr := s.Addr() if addr.CanInterface() { i := addr.Interface() - if u, ok := i.(workspaces.Union); ok { + if u, ok := i.(dw.Union); ok { u.Normalize() } } @@ -34,7 +34,7 @@ func (n *simplifier) Struct(s reflect.Value) error { addr := s.Addr() if addr.CanInterface() { i := addr.Interface() - if u, ok := i.(workspaces.Union); ok { + if u, ok := i.(dw.Union); ok { u.Simplify() } } diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/commands.go b/vendor/github.com/devfile/api/v2/pkg/validation/commands.go index 9fc4e2270e3..1ebf6d3161f 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/commands.go +++ b/vendor/github.com/devfile/api/v2/pkg/validation/commands.go @@ -5,27 +5,28 @@ import ( "strings" "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/hashicorp/go-multierror" ) // ValidateCommands validates the devfile commands and checks: // 1. there are no duplicate command ids // 2. the command type is not invalid // 3. if a command is part of a command group, there is a single default command -func ValidateCommands(commands []v1alpha2.Command, components []v1alpha2.Component) (err error) { +func ValidateCommands(commands []v1alpha2.Command, components []v1alpha2.Component) (returnedErr error) { groupKindCommandMap := make(map[v1alpha2.CommandGroupKind][]v1alpha2.Command) commandMap := getCommandsMap(commands) - err = v1alpha2.CheckDuplicateKeys(commands) + err := v1alpha2.CheckDuplicateKeys(commands) if err != nil { - return err + returnedErr = multierror.Append(returnedErr, err) } for _, command := range commands { // parentCommands is a map to keep a track of all the parent commands when validating the composite command's subcommands recursively parentCommands := make(map[string]string) - err = validateCommand(command, parentCommands, commandMap, components) + err := validateCommand(command, parentCommands, commandMap, components) if err != nil { - return err + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, command.Attributes)) } commandGroup := getGroup(command) @@ -34,54 +35,42 @@ func ValidateCommands(commands []v1alpha2.Command, components []v1alpha2.Compone } } - groupErrors := "" for groupKind, commands := range groupKindCommandMap { - if err = validateGroup(commands); err != nil { - groupErrors += fmt.Sprintf("\ncommand group %s error - %s", groupKind, err.Error()) + if err := validateGroup(commands, groupKind); err != nil { + returnedErr = multierror.Append(returnedErr, err) } } - if len(groupErrors) > 0 { - err = fmt.Errorf("%s", groupErrors) - } - - return err + return returnedErr } // validateCommand validates a given devfile command where parentCommands is a map to track all the parent commands when validating // the composite command's subcommands recursively and devfileCommands is a map of command id to the devfile command -func validateCommand(command v1alpha2.Command, parentCommands map[string]string, devfileCommands map[string]v1alpha2.Command, components []v1alpha2.Component) (err error) { +func validateCommand(command v1alpha2.Command, parentCommands map[string]string, devfileCommands map[string]v1alpha2.Command, components []v1alpha2.Component) error { switch { case command.Composite != nil: return validateCompositeCommand(&command, parentCommands, devfileCommands, components) case command.Exec != nil || command.Apply != nil: return validateCommandComponent(command, components) - case command.VscodeLaunch != nil: - if command.VscodeLaunch.Uri != "" { - return ValidateURI(command.VscodeLaunch.Uri) - } - case command.VscodeTask != nil: - if command.VscodeTask.Uri != "" { - return ValidateURI(command.VscodeTask.Uri) - } default: - err = fmt.Errorf("command %s type is invalid", command.Id) + return &InvalidCommandTypeError{commandId: command.Id} } - return err } // validateGroup validates commands belonging to a specific group kind. If there are multiple commands belonging to the same group: // 1. without any default, err out // 2. with more than one default, err out -func validateGroup(commands []v1alpha2.Command) error { +func validateGroup(commands []v1alpha2.Command, groupKind v1alpha2.CommandGroupKind) error { defaultCommandCount := 0 - + var defaultCommands []v1alpha2.Command if len(commands) > 1 { for _, command := range commands { - if getGroup(command).IsDefault { + defaultVal := getGroup(command).IsDefault + if defaultVal != nil && *defaultVal { defaultCommandCount++ + defaultCommands = append(defaultCommands, command) } } } else { @@ -89,9 +78,20 @@ func validateGroup(commands []v1alpha2.Command) error { } if defaultCommandCount == 0 { - return fmt.Errorf("there should be exactly one default command, currently there is no default command") + return &MissingDefaultCmdWarning{groupKind: groupKind} } else if defaultCommandCount > 1 { - return fmt.Errorf("there should be exactly one default command, currently there is more than one default command") + var commandsReferenceList []string + for _, command := range defaultCommands { + commandsReferenceList = append(commandsReferenceList, + resolveErrorMessageWithImportAttributes(fmt.Errorf("command: %s", command.Id), command.Attributes).Error()) + } + commandsReference := strings.Join(commandsReferenceList, "; ") + // example: there should be exactly one default command, currently there are multiple commands; + // command: ; command: , imported from uri: http://127.0.0.1:8080, in parent overrides from main devfile" + return &MultipleDefaultCmdError{ + groupKind: groupKind, + commandsReference: commandsReference, + } } return nil @@ -106,10 +106,6 @@ func getGroup(command v1alpha2.Command) *v1alpha2.CommandGroup { return command.Exec.Group case command.Apply != nil: return command.Apply.Group - case command.VscodeLaunch != nil: - return command.VscodeLaunch.Group - case command.VscodeTask != nil: - return command.VscodeTask.Group case command.Custom != nil: return command.Custom.Group @@ -132,13 +128,20 @@ func validateCommandComponent(command v1alpha2.Command, components []v1alpha2.Co commandComponent = command.Apply.Component } - // must map to a container component + // exec command must map to a container component + // apply command must map to a container/kubernetes/openshift/image component for _, component := range components { - if component.Container != nil && commandComponent == component.Name { - return nil + if commandComponent == component.Name { + if component.Container != nil { + return nil + } + if command.Apply != nil && (component.Image != nil || component.Kubernetes != nil || component.Openshift != nil) { + return nil + } + break } } - return &InvalidCommandError{commandId: command.Id, reason: "command does not map to a container component"} + return &InvalidCommandError{commandId: command.Id, reason: "command does not map to a valid component"} } // validateCompositeCommand checks that the specified composite command is valid. The command: diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/components.go b/vendor/github.com/devfile/api/v2/pkg/validation/components.go index 73d29077800..10b3c89ad2f 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/components.go +++ b/vendor/github.com/devfile/api/v2/pkg/validation/components.go @@ -2,8 +2,10 @@ package validation import ( "fmt" + "strings" "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/hashicorp/go-multierror" "k8s.io/apimachinery/pkg/api/resource" ) @@ -20,16 +22,18 @@ const ( // 2. makes sure the volume components are unique // 3. checks the URI specified in openshift components and kubernetes components are with valid format // 4. makes sure the component name is unique -func ValidateComponents(components []v1alpha2.Component) error { +// 5. makes sure the image dockerfile component git src has at most one remote +func ValidateComponents(components []v1alpha2.Component) (returnedErr error) { processedVolumes := make(map[string]bool) processedVolumeMounts := make(map[string][]string) processedEndPointName := make(map[string]bool) processedEndPointPort := make(map[int]bool) + processedComponentWithVolumeMounts := make(map[string]v1alpha2.Component) err := v1alpha2.CheckDuplicateKeys(components) if err != nil { - return err + returnedErr = multierror.Append(returnedErr, err) } for _, component := range components { @@ -38,21 +42,26 @@ func ValidateComponents(components []v1alpha2.Component) error { // Process all the volume mounts in container components to validate them later for _, volumeMount := range component.Container.VolumeMounts { processedVolumeMounts[component.Name] = append(processedVolumeMounts[component.Name], volumeMount.Name) + processedComponentWithVolumeMounts[component.Name] = component } // Check if any containers are customizing the reserved PROJECT_SOURCE or PROJECTS_ROOT env for _, env := range component.Container.Env { if env.Name == EnvProjectsSrc { - return &ReservedEnvError{envName: EnvProjectsSrc, componentName: component.Name} + reservedEnvErr := &ReservedEnvError{envName: EnvProjectsSrc, componentName: component.Name} + returnedErr = multierror.Append(returnedErr, reservedEnvErr) } else if env.Name == EnvProjectsRoot { - return &ReservedEnvError{envName: EnvProjectsRoot, componentName: component.Name} + reservedEnvErr := &ReservedEnvError{envName: EnvProjectsRoot, componentName: component.Name} + returnedErr = multierror.Append(returnedErr, reservedEnvErr) } } err := validateEndpoints(component.Container.Endpoints, processedEndPointPort, processedEndPointName) - if err != nil { - return err + if len(err) > 0 { + for _, endpointErr := range err { + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(endpointErr, component.Attributes)) + } } case component.Volume != nil: processedVolumes[component.Name] = true @@ -61,37 +70,50 @@ func ValidateComponents(components []v1alpha2.Component) error { // express storage in Kubernetes. For reference, you may check doc // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ if _, err := resource.ParseQuantity(component.Volume.Size); err != nil { - return &InvalidVolumeError{name: component.Name, reason: fmt.Sprintf("size %s for volume component is invalid, %v. Example - 2Gi, 1024Mi", component.Volume.Size, err)} + invalidVolErr := &InvalidVolumeError{name: component.Name, reason: fmt.Sprintf("size %s for volume component is invalid, %v. Example - 2Gi, 1024Mi", component.Volume.Size, err)} + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(invalidVolErr, component.Attributes)) } } case component.Openshift != nil: if component.Openshift.Uri != "" { err := ValidateURI(component.Openshift.Uri) if err != nil { - return err + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) } } err := validateEndpoints(component.Openshift.Endpoints, processedEndPointPort, processedEndPointName) - if err != nil { - return err + if len(err) > 0 { + for _, endpointErr := range err { + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(endpointErr, component.Attributes)) + } } case component.Kubernetes != nil: if component.Kubernetes.Uri != "" { err := ValidateURI(component.Kubernetes.Uri) if err != nil { - return err + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) } } err := validateEndpoints(component.Kubernetes.Endpoints, processedEndPointPort, processedEndPointName) - if err != nil { - return err + if len(err) > 0 { + for _, endpointErr := range err { + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(endpointErr, component.Attributes)) + } + } + case component.Image != nil: + var gitSource v1alpha2.GitLikeProjectSource + if component.Image.Dockerfile != nil && component.Image.Dockerfile.Git != nil { + gitSource = component.Image.Dockerfile.Git.GitLikeProjectSource + if err := validateSingleRemoteGitSrc("component", component.Name, gitSource); err != nil { + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) + } } case component.Plugin != nil: if component.Plugin.RegistryUrl != "" { err := ValidateURI(component.Plugin.RegistryUrl) if err != nil { - return err + returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) } } } @@ -99,18 +121,21 @@ func ValidateComponents(components []v1alpha2.Component) error { } // Check if the volume mounts mentioned in the containers are referenced by a volume component - var invalidVolumeMountsErr string + var invalidVolumeMountsErrList []string for componentName, volumeMountNames := range processedVolumeMounts { for _, volumeMountName := range volumeMountNames { if !processedVolumes[volumeMountName] { - invalidVolumeMountsErr += fmt.Sprintf("\nvolume mount %s belonging to the container component %s", volumeMountName, componentName) + missingVolumeMountErr := fmt.Errorf("volume mount %s belonging to the container component %s", volumeMountName, componentName) + newErr := resolveErrorMessageWithImportAttributes(missingVolumeMountErr, processedComponentWithVolumeMounts[componentName].Attributes) + invalidVolumeMountsErrList = append(invalidVolumeMountsErrList, newErr.Error()) } } } - if len(invalidVolumeMountsErr) > 0 { - return &MissingVolumeMountError{errMsg: invalidVolumeMountsErr} + if len(invalidVolumeMountsErrList) > 0 { + invalidVolumeMountsErr := fmt.Sprintf("\n%s", strings.Join(invalidVolumeMountsErrList, "\n")) + returnedErr = multierror.Append(returnedErr, &MissingVolumeMountError{errMsg: invalidVolumeMountsErr}) } - return nil + return returnedErr } diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/endpoints.go b/vendor/github.com/devfile/api/v2/pkg/validation/endpoints.go index 4ca099e9ccf..9970c899d0d 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/endpoints.go +++ b/vendor/github.com/devfile/api/v2/pkg/validation/endpoints.go @@ -7,12 +7,12 @@ import "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" // 2. endpoint port are unique across component containers // ie; two component containers cannot have the same target port but two endpoints // in a single component container can have the same target port -func validateEndpoints(endpoints []v1alpha2.Endpoint, processedEndPointPort map[int]bool, processedEndPointName map[string]bool) error { +func validateEndpoints(endpoints []v1alpha2.Endpoint, processedEndPointPort map[int]bool, processedEndPointName map[string]bool) (errList []error) { currentComponentEndPointPort := make(map[int]bool) for _, endPoint := range endpoints { if _, ok := processedEndPointName[endPoint.Name]; ok { - return &InvalidEndpointError{name: endPoint.Name} + errList = append(errList, &InvalidEndpointError{name: endPoint.Name}) } processedEndPointName[endPoint.Name] = true currentComponentEndPointPort[endPoint.TargetPort] = true @@ -20,9 +20,9 @@ func validateEndpoints(endpoints []v1alpha2.Endpoint, processedEndPointPort map[ for targetPort := range currentComponentEndPointPort { if _, ok := processedEndPointPort[targetPort]; ok { - return &InvalidEndpointError{port: targetPort} + errList = append(errList, &InvalidEndpointError{port: targetPort}) } processedEndPointPort[targetPort] = true } - return nil + return errList } diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/errors.go b/vendor/github.com/devfile/api/v2/pkg/validation/errors.go index ecca9ff663d..efb42a8c845 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/errors.go +++ b/vendor/github.com/devfile/api/v2/pkg/validation/errors.go @@ -1,6 +1,11 @@ package validation -import "fmt" +import ( + "fmt" + + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + attributesAPI "github.com/devfile/api/v2/pkg/attributes" +) // InvalidEventError returns an error if the devfile event type has invalid events type InvalidEventError struct { @@ -22,6 +27,35 @@ func (e *InvalidCommandError) Error() string { return fmt.Sprintf("the command %q is invalid - %s", e.commandId, e.reason) } +// InvalidCommandError returns an error if the command is invalid +type InvalidCommandTypeError struct { + commandId string +} + +func (e *InvalidCommandTypeError) Error() string { + return fmt.Sprintf("command %s has invalid type", e.commandId) +} + +// MultipleDefaultCmdError returns an error if there are multiple default commands for a single group kind +type MultipleDefaultCmdError struct { + groupKind v1alpha2.CommandGroupKind + commandsReference string +} + +func (e *MultipleDefaultCmdError) Error() string { + return fmt.Sprintf("command group %s error - there should be exactly one default command, currently there are multiple default commands; %s", + e.groupKind, e.commandsReference) +} + +// MissingDefaultCmdWarning returns an error if there is no default command for a single group kind +type MissingDefaultCmdWarning struct { + groupKind v1alpha2.CommandGroupKind +} + +func (e *MissingDefaultCmdWarning) Error() string { + return fmt.Sprintf("command group %s warning - there should be exactly one default command, currently there is no default command", e.groupKind) +} + // ReservedEnvError returns an error if the user attempts to customize a reserved ENV in a container type ReservedEnvError struct { componentName string @@ -77,3 +111,84 @@ type InvalidComponentError struct { func (e *InvalidComponentError) Error() string { return fmt.Sprintf("the component %q is invalid - %s", e.componentName, e.reason) } + +//MissingProjectRemoteError returns an error if the git remotes object under a project is empty +type MissingProjectRemoteError struct { + projectName string +} + +func (e *MissingProjectRemoteError) Error() string { + return fmt.Sprintf("project %s should have at least one remote", e.projectName) +} + +//MissingRemoteError returns an error if the git remotes object is empty +type MissingRemoteError struct { + objectType string + objectName string +} + +func (e *MissingRemoteError) Error() string { + return fmt.Sprintf("%s %s should have at least one remote", e.objectType, e.objectName) +} + +//MultipleRemoteError returns an error if multiple git remotes are specified. There can only be one remote. +type MultipleRemoteError struct { + objectType string + objectName string +} + +func (e *MultipleRemoteError) Error() string { + return fmt.Sprintf("%s %s should have one remote only", e.objectType, e.objectName) +} + +//MissingProjectCheckoutFromRemoteError returns an error if there are multiple git remotes but the checkoutFrom remote has not been specified +type MissingProjectCheckoutFromRemoteError struct { + projectName string +} + +func (e *MissingProjectCheckoutFromRemoteError) Error() string { + return fmt.Sprintf("project %s has more than one remote defined, but has no checkoutfrom remote defined", e.projectName) +} + +//InvalidProjectCheckoutRemoteError returns an error if there is an unmatched, checkoutFrom remote specified +type InvalidProjectCheckoutRemoteError struct { + objectType string + objectName string + checkoutRemote string +} + +func (e *InvalidProjectCheckoutRemoteError) Error() string { + return fmt.Sprintf("unable to find the checkout remote %s in the remotes for %s %s", e.checkoutRemote, e.objectType, e.objectName) +} + +// resolveErrorMessageWithImportAttributes returns an updated error message +// with detailed information on the imported and overriden resource. +// example: +// "the component is invalid - , imported from Uri: http://example.com/devfile.yaml, in parent overrides from main devfile" +func resolveErrorMessageWithImportAttributes(validationErr error, attributes attributesAPI.Attributes) error { + var findKeyErr error + importReference := attributes.Get(ImportSourceAttribute, &findKeyErr) + + // overridden element must contain import resource information + // an overridden element can be either parentOverride or pluginOverride + // example: + // if an element is imported from another devfile, but contains no overrides - ImportSourceAttribute + // if an element is from parentOverride - ImportSourceAttribute + ParentOverrideAttribute + // if an element is from pluginOverride - ImportSourceAttribute + PluginOverrideAttribute + if findKeyErr == nil { + validationErr = fmt.Errorf("%s, imported from %s", validationErr.Error(), importReference) + parentOverrideReference := attributes.Get(ParentOverrideAttribute, &findKeyErr) + if findKeyErr == nil { + validationErr = fmt.Errorf("%s, in parent overrides from %s", validationErr.Error(), parentOverrideReference) + } else { + // reset findKeyErr to nil + findKeyErr = nil + pluginOverrideReference := attributes.Get(PluginOverrideAttribute, &findKeyErr) + if findKeyErr == nil { + validationErr = fmt.Errorf("%s, in plugin overrides from %s", validationErr.Error(), pluginOverrideReference) + } + } + } + + return validationErr +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/events.go b/vendor/github.com/devfile/api/v2/pkg/validation/events.go index 93cb084b975..e31e474ba72 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/events.go +++ b/vendor/github.com/devfile/api/v2/pkg/validation/events.go @@ -2,6 +2,7 @@ package validation import ( "fmt" + "github.com/hashicorp/go-multierror" "strings" "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" @@ -15,40 +16,33 @@ const ( ) // ValidateEvents validates all the devfile events -func ValidateEvents(events v1alpha2.Events, commands []v1alpha2.Command) error { - - eventErrors := "" +func ValidateEvents(events v1alpha2.Events, commands []v1alpha2.Command) (err error) { commandMap := getCommandsMap(commands) switch { case len(events.PreStart) > 0: if preStartErr := isEventValid(events.PreStart, preStart, commandMap); preStartErr != nil { - eventErrors += fmt.Sprintf("\n%s", preStartErr.Error()) + err = multierror.Append(err, preStartErr) } fallthrough case len(events.PostStart) > 0: if postStartErr := isEventValid(events.PostStart, postStart, commandMap); postStartErr != nil { - eventErrors += fmt.Sprintf("\n%s", postStartErr.Error()) + err = multierror.Append(err, postStartErr) } fallthrough case len(events.PreStop) > 0: if preStopErr := isEventValid(events.PreStop, preStop, commandMap); preStopErr != nil { - eventErrors += fmt.Sprintf("\n%s", preStopErr.Error()) + err = multierror.Append(err, preStopErr) } fallthrough case len(events.PostStop) > 0: if postStopErr := isEventValid(events.PostStop, postStop, commandMap); postStopErr != nil { - eventErrors += fmt.Sprintf("\n%s", postStopErr.Error()) + err = multierror.Append(err, postStopErr) } } - // if there is any validation error, return it - if len(eventErrors) > 0 { - return fmt.Errorf("devfile events validation error: %s", eventErrors) - } - - return nil + return err } // isEventValid checks if events belonging to a specific event type are valid ie; @@ -83,22 +77,23 @@ func isEventValid(eventNames []string, eventType string, commandMap map[string]v } } - var eventErrors string var err error + var eventErrorsList []string if len(invalidCommand) > 0 { - eventErrors = fmt.Sprintf("\n%s does not map to a valid devfile command", strings.Join(invalidCommand, ", ")) + eventErrorsList = append(eventErrorsList, fmt.Sprintf("%s does not map to a valid devfile command", strings.Join(invalidCommand, ", "))) } if len(invalidApplyEvents) > 0 { - eventErrors += fmt.Sprintf("\n%s should either map to an apply command or a composite command with apply commands", strings.Join(invalidApplyEvents, ", ")) + eventErrorsList = append(eventErrorsList, fmt.Sprintf("%s should either map to an apply command or a composite command with apply commands", strings.Join(invalidApplyEvents, ", "))) } if len(invalidExecEvents) > 0 { - eventErrors += fmt.Sprintf("\n%s should either map to an exec command or a composite command with exec commands", strings.Join(invalidExecEvents, ", ")) + eventErrorsList = append(eventErrorsList, fmt.Sprintf("%s should either map to an exec command or a composite command with exec commands", strings.Join(invalidExecEvents, ", "))) } - if len(eventErrors) != 0 { + if len(eventErrorsList) != 0 { + eventErrors := fmt.Sprintf("\n%s", strings.Join(eventErrorsList, "\n")) err = &InvalidEventError{eventType: eventType, errorMsg: eventErrors} } diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/projects.go b/vendor/github.com/devfile/api/v2/pkg/validation/projects.go index ea4e8880d91..4cbc875c0df 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/projects.go +++ b/vendor/github.com/devfile/api/v2/pkg/validation/projects.go @@ -1,98 +1,94 @@ package validation import ( - "fmt" - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/hashicorp/go-multierror" ) // ValidateStarterProjects checks if starter project has only one remote configured -// and if the checkout remote matches the renote configured -func ValidateStarterProjects(starterProjects []v1alpha2.StarterProject) error { +// and if the checkout remote matches the remote configured +func ValidateStarterProjects(starterProjects []v1alpha2.StarterProject) (returnedErr error) { - var errString string for _, starterProject := range starterProjects { var gitSource v1alpha2.GitLikeProjectSource if starterProject.Git != nil { gitSource = starterProject.Git.GitLikeProjectSource - } else if starterProject.Github != nil { - gitSource = starterProject.Github.GitLikeProjectSource } else { continue } - switch len(gitSource.Remotes) { - case 0: - errString += fmt.Sprintf("\nstarterProject %s should have at least one remote", starterProject.Name) - case 1: - if gitSource.CheckoutFrom != nil && gitSource.CheckoutFrom.Remote != "" { - err := validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, starterProject.Name) - if err != nil { - errString += fmt.Sprintf("\n%s", err.Error()) - } - } - default: // len(gitSource.Remotes) >= 2 - errString += fmt.Sprintf("\nstarterProject %s should have one remote only", starterProject.Name) + if starterProjectErr := validateSingleRemoteGitSrc("starterProject", starterProject.Name, gitSource); starterProjectErr != nil { + newErr := resolveErrorMessageWithImportAttributes(starterProjectErr, starterProject.Attributes) + returnedErr = multierror.Append(returnedErr, newErr) } } - var err error - if len(errString) > 0 { - err = fmt.Errorf("error validating starter projects:%s", errString) - } - - return err + return returnedErr } // ValidateProjects checks if the project has more than one remote configured then a checkout // remote is mandatory and if the checkout remote matches the renote configured -func ValidateProjects(projects []v1alpha2.Project) error { +func ValidateProjects(projects []v1alpha2.Project) (returnedErr error) { - var errString string for _, project := range projects { var gitSource v1alpha2.GitLikeProjectSource if project.Git != nil { gitSource = project.Git.GitLikeProjectSource - } else if project.Github != nil { - gitSource = project.Github.GitLikeProjectSource } else { continue } - switch len(gitSource.Remotes) { case 0: - errString += fmt.Sprintf("\nprojects %s should have at least one remote", project.Name) + + newErr := resolveErrorMessageWithImportAttributes(&MissingProjectRemoteError{projectName: project.Name}, project.Attributes) + returnedErr = multierror.Append(returnedErr, newErr) case 1: if gitSource.CheckoutFrom != nil && gitSource.CheckoutFrom.Remote != "" { - if err := validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, project.Name); err != nil { - errString += fmt.Sprintf("\n%s", err.Error()) + if err := validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, "project", project.Name); err != nil { + newErr := resolveErrorMessageWithImportAttributes(err, project.Attributes) + returnedErr = multierror.Append(returnedErr, newErr) } } default: // len(gitSource.Remotes) >= 2 if gitSource.CheckoutFrom == nil || gitSource.CheckoutFrom.Remote == "" { - errString += fmt.Sprintf("\nproject %s has more than one remote defined, but has no checkoutfrom remote defined", project.Name) + + newErr := resolveErrorMessageWithImportAttributes(&MissingProjectCheckoutFromRemoteError{projectName: project.Name}, project.Attributes) + returnedErr = multierror.Append(returnedErr, newErr) continue } - if err := validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, project.Name); err != nil { - errString += fmt.Sprintf("\n%s", err.Error()) + if err := validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, "project", project.Name); err != nil { + newErr := resolveErrorMessageWithImportAttributes(err, project.Attributes) + returnedErr = multierror.Append(returnedErr, newErr) } } } - var err error - if len(errString) > 0 { - err = fmt.Errorf("error validating projects:%s", errString) - } - - return err + return returnedErr } // validateRemoteMap checks if the checkout remote is present in the project remote map -func validateRemoteMap(remotes map[string]string, checkoutRemote, projectName string) error { +func validateRemoteMap(remotes map[string]string, checkoutRemote, objectType, objectName string) error { if _, ok := remotes[checkoutRemote]; !ok { - return fmt.Errorf("unable to find the checkout remote %s in the remotes for project %s", checkoutRemote, projectName) + + return &InvalidProjectCheckoutRemoteError{objectName: objectName, objectType: objectType, checkoutRemote: checkoutRemote} } return nil } + +// validateSingleRemoteGitSrc validates a git src for a single remote only +func validateSingleRemoteGitSrc(objectType, objectName string, gitSource v1alpha2.GitLikeProjectSource) (err error) { + switch len(gitSource.Remotes) { + case 0: + err = &MissingRemoteError{objectType: objectType, objectName: objectName} + case 1: + if gitSource.CheckoutFrom != nil && gitSource.CheckoutFrom.Remote != "" { + err = validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, objectType, objectName) + } + default: // len(gitSource.Remotes) >= 2 + err = &MultipleRemoteError{objectType: objectType, objectName: objectName} + } + + return err +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/utils.go b/vendor/github.com/devfile/api/v2/pkg/validation/utils.go index e8de8ad383f..6f0ec12df36 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/utils.go +++ b/vendor/github.com/devfile/api/v2/pkg/validation/utils.go @@ -7,6 +7,17 @@ import ( "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" ) +// attribute keys for imported and overridden elements +// the value of those keys is the resource information +const ( + // attribute key of the imported element resource information + ImportSourceAttribute = "api.devfile.io/imported-from" + // attribute key of the parent overridden element resource information + ParentOverrideAttribute = "api.devfile.io/parent-override-from" + // attribute key of the plugin overridden element resource information + PluginOverrideAttribute = "api.devfile.io/plugin-override-from" +) + // getCommandsMap iterates through the commands and returns a map of command func getCommandsMap(commands []v1alpha2.Command) map[string]v1alpha2.Command { commandMap := make(map[string]v1alpha2.Command, len(commands)) diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/validation-rule.md b/vendor/github.com/devfile/api/v2/pkg/validation/validation-rule.md index d1d850dbf65..e72d47bcfba 100644 --- a/vendor/github.com/devfile/api/v2/pkg/validation/validation-rule.md +++ b/vendor/github.com/devfile/api/v2/pkg/validation/validation-rule.md @@ -23,9 +23,9 @@ Since network is shared in the same pod, endpoint ports should be unique across - Should not reference itself via a subcommand - Should not indirectly reference itself via a subcommand which is a composite command - Should reference a valid devfile command -3. exec and apply command should: map to a valid container component -4. vscodeLaunch & vscodeTask: URI needs to be in valid URI format -5. `{build, run, test, debug}`, each kind of group can only have one default command associated with it. If there are multiple commands of the same kind without a default, a warning will be displayed. +3. exec command should: map to a valid container component +4. apply command should: map to a valid container/kubernetes/openshift/image component +5. `{build, run, test, debug, deploy}`, each kind of group can only have one default command associated with it. If there are multiple commands of the same kind without a default, a warning will be displayed. ### Components: Common rules for all components types: @@ -42,6 +42,9 @@ Common rules for all components types: #### Kubernetes & Openshift component - URI needs to be in valid URI format +#### Image component +- A Dockerfile Image component's git source cannot have more than one remote defined. If checkout remote is mentioned, validate it against the remote configured map + ### Events: 1. preStart and postStop events can only be Apply commands diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/variables/errors.go b/vendor/github.com/devfile/api/v2/pkg/validation/variables/errors.go new file mode 100644 index 00000000000..572eefaac49 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/validation/variables/errors.go @@ -0,0 +1,31 @@ +package variables + +import ( + "fmt" + "sort" + "strings" +) + +// InvalidKeysError returns an error for the invalid keys +type InvalidKeysError struct { + Keys []string +} + +func (e *InvalidKeysError) Error() string { + return fmt.Sprintf("invalid variable references - %s", strings.Join(e.Keys, ",")) +} + +// newInvalidKeysError processes the invalid key set and returns an InvalidKeysError if present +func newInvalidKeysError(keySet map[string]bool) error { + var invalidKeysArr []string + for key := range keySet { + invalidKeysArr = append(invalidKeysArr, key) + } + + if len(invalidKeysArr) > 0 { + sort.Strings(invalidKeysArr) + return &InvalidKeysError{Keys: invalidKeysArr} + } + + return nil +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/variables/utils.go b/vendor/github.com/devfile/api/v2/pkg/validation/variables/utils.go new file mode 100644 index 00000000000..be047ee2589 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/validation/variables/utils.go @@ -0,0 +1,10 @@ +package variables + +// checkForInvalidError checks for InvalidKeysError and stores the key in the map +func checkForInvalidError(invalidKeys map[string]bool, err error) { + if verr, ok := err.(*InvalidKeysError); ok { + for _, key := range verr.Keys { + invalidKeys[key] = true + } + } +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables.go b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables.go new file mode 100644 index 00000000000..0abba8f8169 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables.go @@ -0,0 +1,69 @@ +package variables + +import ( + "regexp" + "strings" + + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" +) + +// example of the regex: {{variable}} / {{ variable }} +var globalVariableRegex = regexp.MustCompile(`\{\{\s*(.*?)\s*\}\}`) + +// VariableWarning stores the invalid variable references for each devfile object +type VariableWarning struct { + // Commands stores a map of command ids to the invalid variable references + Commands map[string][]string + + // Components stores a map of component names to the invalid variable references + Components map[string][]string + + // Projects stores a map of project names to the invalid variable references + Projects map[string][]string + + // StarterProjects stores a map of starter project names to the invalid variable references + StarterProjects map[string][]string +} + +// ValidateAndReplaceGlobalVariable validates the workspace template spec data for global variable references and replaces them with the variable value +func ValidateAndReplaceGlobalVariable(workspaceTemplateSpec *v1alpha2.DevWorkspaceTemplateSpec) VariableWarning { + + var variableWarning VariableWarning + + if workspaceTemplateSpec != nil { + // Validate the components and replace for global variable + variableWarning.Components = ValidateAndReplaceForComponents(workspaceTemplateSpec.Variables, workspaceTemplateSpec.Components) + + // Validate the commands and replace for global variable + variableWarning.Commands = ValidateAndReplaceForCommands(workspaceTemplateSpec.Variables, workspaceTemplateSpec.Commands) + + // Validate the projects and replace for global variable + variableWarning.Projects = ValidateAndReplaceForProjects(workspaceTemplateSpec.Variables, workspaceTemplateSpec.Projects) + + // Validate the starter projects and replace for global variable + variableWarning.StarterProjects = ValidateAndReplaceForStarterProjects(workspaceTemplateSpec.Variables, workspaceTemplateSpec.StarterProjects) + } + + return variableWarning +} + +// validateAndReplaceDataWithVariable validates the string for a global variable and replaces it. An error +// is returned if the string references an invalid global variable key +func validateAndReplaceDataWithVariable(val string, variables map[string]string) (string, error) { + matches := globalVariableRegex.FindAllStringSubmatch(val, -1) + var invalidKeys []string + for _, match := range matches { + varValue, ok := variables[match[1]] + if !ok { + invalidKeys = append(invalidKeys, match[1]) + } else { + val = strings.Replace(val, match[0], varValue, -1) + } + } + + if len(invalidKeys) > 0 { + return val, &InvalidKeysError{Keys: invalidKeys} + } + + return val, nil +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_command.go b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_command.go new file mode 100644 index 00000000000..eb51b230a14 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_command.go @@ -0,0 +1,111 @@ +package variables + +import ( + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" +) + +// ValidateAndReplaceForCommands validates the commands data for global variable references and replaces them with the variable value. +// Returns a map of command ids and invalid variable references if present. +func ValidateAndReplaceForCommands(variables map[string]string, commands []v1alpha2.Command) map[string][]string { + + commandsWarningMap := make(map[string][]string) + + for i := range commands { + var err error + + // Validate various command types + switch { + case commands[i].Exec != nil: + if err = validateAndReplaceForExecCommand(variables, commands[i].Exec); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + commandsWarningMap[commands[i].Id] = verr.Keys + } + } + case commands[i].Composite != nil: + if err = validateAndReplaceForCompositeCommand(variables, commands[i].Composite); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + commandsWarningMap[commands[i].Id] = verr.Keys + } + } + case commands[i].Apply != nil: + if err = validateAndReplaceForApplyCommand(variables, commands[i].Apply); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + commandsWarningMap[commands[i].Id] = verr.Keys + } + } + } + } + + return commandsWarningMap +} + +// validateAndReplaceForExecCommand validates the exec command data for global variable references and replaces them with the variable value +func validateAndReplaceForExecCommand(variables map[string]string, exec *v1alpha2.ExecCommand) error { + + if exec == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate exec command line + if exec.CommandLine, err = validateAndReplaceDataWithVariable(exec.CommandLine, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate exec working dir + if exec.WorkingDir, err = validateAndReplaceDataWithVariable(exec.WorkingDir, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate exec label + if exec.Label, err = validateAndReplaceDataWithVariable(exec.Label, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate exec env + if len(exec.Env) > 0 { + if err = validateAndReplaceForEnv(variables, exec.Env); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForCompositeCommand validates the composite command data for global variable references and replaces them with the variable value +func validateAndReplaceForCompositeCommand(variables map[string]string, composite *v1alpha2.CompositeCommand) error { + + if composite == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate composite label + if composite.Label, err = validateAndReplaceDataWithVariable(composite.Label, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForApplyCommand validates the apply command data for global variable references and replaces them with the variable value +func validateAndReplaceForApplyCommand(variables map[string]string, apply *v1alpha2.ApplyCommand) error { + + if apply == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate apply label + if apply.Label, err = validateAndReplaceDataWithVariable(apply.Label, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + return newInvalidKeysError(invalidKeys) +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_component.go b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_component.go new file mode 100644 index 00000000000..a42aa2f171d --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_component.go @@ -0,0 +1,293 @@ +package variables + +import ( + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" +) + +// ValidateAndReplaceForComponents validates the components data for global variable references and replaces them with the variable value +// Returns a map of component names and invalid variable references if present. +func ValidateAndReplaceForComponents(variables map[string]string, components []v1alpha2.Component) map[string][]string { + + componentsWarningMap := make(map[string][]string) + + for i := range components { + var err error + + // Validate various component types + switch { + case components[i].Container != nil: + if err = validateAndReplaceForContainerComponent(variables, components[i].Container); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + componentsWarningMap[components[i].Name] = verr.Keys + } + } + case components[i].Kubernetes != nil: + if err = validateAndReplaceForKubernetesComponent(variables, components[i].Kubernetes); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + componentsWarningMap[components[i].Name] = verr.Keys + } + } + case components[i].Openshift != nil: + if err = validateAndReplaceForOpenShiftComponent(variables, components[i].Openshift); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + componentsWarningMap[components[i].Name] = verr.Keys + } + } + case components[i].Image != nil: + if err = validateAndReplaceForImageComponent(variables, components[i].Image); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + componentsWarningMap[components[i].Name] = verr.Keys + } + } + case components[i].Volume != nil: + if err = validateAndReplaceForVolumeComponent(variables, components[i].Volume); err != nil { + if verr, ok := err.(*InvalidKeysError); ok { + componentsWarningMap[components[i].Name] = verr.Keys + } + } + } + } + + return componentsWarningMap +} + +// validateAndReplaceForContainerComponent validates the container component data for global variable references and replaces them with the variable value +func validateAndReplaceForContainerComponent(variables map[string]string, container *v1alpha2.ContainerComponent) error { + + if container == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate container image + if container.Image, err = validateAndReplaceDataWithVariable(container.Image, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate container commands + for i := range container.Command { + if container.Command[i], err = validateAndReplaceDataWithVariable(container.Command[i], variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + // Validate container args + for i := range container.Args { + if container.Args[i], err = validateAndReplaceDataWithVariable(container.Args[i], variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + // Validate memory limit + if container.MemoryLimit, err = validateAndReplaceDataWithVariable(container.MemoryLimit, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate memory request + if container.MemoryRequest, err = validateAndReplaceDataWithVariable(container.MemoryRequest, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate source mapping + if container.SourceMapping, err = validateAndReplaceDataWithVariable(container.SourceMapping, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate container env + if len(container.Env) > 0 { + if err = validateAndReplaceForEnv(variables, container.Env); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + // Validate container volume mounts + for i := range container.VolumeMounts { + if container.VolumeMounts[i].Path, err = validateAndReplaceDataWithVariable(container.VolumeMounts[i].Path, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + // Validate container endpoints + if len(container.Endpoints) > 0 { + if err = validateAndReplaceForEndpoint(variables, container.Endpoints); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForEnv validates the env data for global variable references and replaces them with the variable value +func validateAndReplaceForEnv(variables map[string]string, env []v1alpha2.EnvVar) error { + + invalidKeys := make(map[string]bool) + + for i := range env { + var err error + + // Validate env name + if env[i].Name, err = validateAndReplaceDataWithVariable(env[i].Name, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate env value + if env[i].Value, err = validateAndReplaceDataWithVariable(env[i].Value, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForKubernetesComponent validates the kubernetes component data for global variable references and replaces them with the variable value +func validateAndReplaceForKubernetesComponent(variables map[string]string, kubernetes *v1alpha2.KubernetesComponent) error { + + if kubernetes == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate kubernetes uri + if kubernetes.Uri, err = validateAndReplaceDataWithVariable(kubernetes.Uri, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate kubernetes inlined + if kubernetes.Inlined, err = validateAndReplaceDataWithVariable(kubernetes.Inlined, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate kubernetes endpoints + if len(kubernetes.Endpoints) > 0 { + if err = validateAndReplaceForEndpoint(variables, kubernetes.Endpoints); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForOpenShiftComponent validates the openshift component data for global variable references and replaces them with the variable value +func validateAndReplaceForOpenShiftComponent(variables map[string]string, openshift *v1alpha2.OpenshiftComponent) error { + + if openshift == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate openshift uri + if openshift.Uri, err = validateAndReplaceDataWithVariable(openshift.Uri, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate openshift inlined + if openshift.Inlined, err = validateAndReplaceDataWithVariable(openshift.Inlined, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate openshift endpoints + if len(openshift.Endpoints) > 0 { + if err = validateAndReplaceForEndpoint(variables, openshift.Endpoints); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForImageComponent validates the image component data for global variable references and replaces them with the variable value +func validateAndReplaceForImageComponent(variables map[string]string, image *v1alpha2.ImageComponent) error { + + if image == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate image's image name + if image.ImageName, err = validateAndReplaceDataWithVariable(image.ImageName, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + if err = validateAndReplaceForDockerfileImageComponent(variables, image.Dockerfile); err != nil { + checkForInvalidError(invalidKeys, err) + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForDockerfileImageComponent validates the dockerfile image component data for global variable references and replaces them with the variable value +func validateAndReplaceForDockerfileImageComponent(variables map[string]string, dockerfileImage *v1alpha2.DockerfileImage) error { + + if dockerfileImage == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + switch { + case dockerfileImage.Uri != "": + // Validate dockerfile image URI + if dockerfileImage.Uri, err = validateAndReplaceDataWithVariable(dockerfileImage.Uri, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + case dockerfileImage.Git != nil: + // Validate dockerfile Git location + if dockerfileImage.Git.FileLocation, err = validateAndReplaceDataWithVariable(dockerfileImage.Git.FileLocation, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + gitProject := &dockerfileImage.Git.GitLikeProjectSource + if err = validateAndReplaceForGitProjectSource(variables, gitProject); err != nil { + checkForInvalidError(invalidKeys, err) + } + case dockerfileImage.DevfileRegistry != nil: + // Validate dockerfile devfile registry src + if dockerfileImage.DevfileRegistry.Id, err = validateAndReplaceDataWithVariable(dockerfileImage.DevfileRegistry.Id, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + if dockerfileImage.DevfileRegistry.RegistryUrl, err = validateAndReplaceDataWithVariable(dockerfileImage.DevfileRegistry.RegistryUrl, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + // Validate dockerfile image's build context + if dockerfileImage.BuildContext, err = validateAndReplaceDataWithVariable(dockerfileImage.BuildContext, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate dockerfile image's args + for i := range dockerfileImage.Args { + if dockerfileImage.Args[i], err = validateAndReplaceDataWithVariable(dockerfileImage.Args[i], variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForVolumeComponent validates the volume component data for global variable references and replaces them with the variable value +func validateAndReplaceForVolumeComponent(variables map[string]string, volume *v1alpha2.VolumeComponent) error { + + if volume == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + // Validate volume size + if volume.Size, err = validateAndReplaceDataWithVariable(volume.Size, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + return newInvalidKeysError(invalidKeys) +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_endpoint.go b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_endpoint.go new file mode 100644 index 00000000000..17a1e2ed0bd --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_endpoint.go @@ -0,0 +1,22 @@ +package variables + +import ( + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" +) + +// validateAndReplaceForEndpoint validates the endpoint data for global variable references and replaces them with the variable value +func validateAndReplaceForEndpoint(variables map[string]string, endpoints []v1alpha2.Endpoint) error { + + invalidKeys := make(map[string]bool) + + for i := range endpoints { + var err error + + // Validate endpoint path + if endpoints[i].Path, err = validateAndReplaceDataWithVariable(endpoints[i].Path, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} diff --git a/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_project.go b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_project.go new file mode 100644 index 00000000000..15afe461696 --- /dev/null +++ b/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_project.go @@ -0,0 +1,138 @@ +package variables + +import ( + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" +) + +// ValidateAndReplaceForProjects validates the projects data for global variable references and replaces them with the variable value. +// Returns a map of project names and invalid variable references if present. +func ValidateAndReplaceForProjects(variables map[string]string, projects []v1alpha2.Project) map[string][]string { + + projectsWarningMap := make(map[string][]string) + + for i := range projects { + var err error + + invalidKeys := make(map[string]bool) + + // Validate project clonepath + if projects[i].ClonePath, err = validateAndReplaceDataWithVariable(projects[i].ClonePath, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate project source + if err = validateandReplaceForProjectSource(variables, &projects[i].ProjectSource); err != nil { + checkForInvalidError(invalidKeys, err) + } + + err = newInvalidKeysError(invalidKeys) + if verr, ok := err.(*InvalidKeysError); ok { + projectsWarningMap[projects[i].Name] = verr.Keys + } + } + + return projectsWarningMap +} + +// ValidateAndReplaceForStarterProjects validates the starter projects data for global variable references and replaces them with the variable value. +// Returns a map of starter project names and invalid variable references if present. +func ValidateAndReplaceForStarterProjects(variables map[string]string, starterProjects []v1alpha2.StarterProject) map[string][]string { + + starterProjectsWarningMap := make(map[string][]string) + + for i := range starterProjects { + var err error + + invalidKeys := make(map[string]bool) + + // Validate starter project description + if starterProjects[i].Description, err = validateAndReplaceDataWithVariable(starterProjects[i].Description, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate starter project sub dir + if starterProjects[i].SubDir, err = validateAndReplaceDataWithVariable(starterProjects[i].SubDir, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // Validate starter project source + if err = validateandReplaceForProjectSource(variables, &starterProjects[i].ProjectSource); err != nil { + checkForInvalidError(invalidKeys, err) + } + + err = newInvalidKeysError(invalidKeys) + if verr, ok := err.(*InvalidKeysError); ok { + starterProjectsWarningMap[starterProjects[i].Name] = verr.Keys + } + } + + return starterProjectsWarningMap +} + +// validateandReplaceForProjectSource validates a project source location for global variable references and replaces them with the variable value +func validateandReplaceForProjectSource(variables map[string]string, projectSource *v1alpha2.ProjectSource) error { + + if projectSource == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + switch { + case projectSource.Zip != nil: + if projectSource.Zip.Location, err = validateAndReplaceDataWithVariable(projectSource.Zip.Location, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + case projectSource.Git != nil: + gitProject := &projectSource.Git.GitLikeProjectSource + + if err = validateAndReplaceForGitProjectSource(variables, gitProject); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + return newInvalidKeysError(invalidKeys) +} + +// validateAndReplaceForGitProjectSource validates a project git src for global variable references and replaces them with the variable value +func validateAndReplaceForGitProjectSource(variables map[string]string, gitProject *v1alpha2.GitLikeProjectSource) error { + + if gitProject == nil { + return nil + } + + var err error + invalidKeys := make(map[string]bool) + + if gitProject.CheckoutFrom != nil { + // validate git checkout revision + if gitProject.CheckoutFrom.Revision, err = validateAndReplaceDataWithVariable(gitProject.CheckoutFrom.Revision, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // // validate git checkout remote + if gitProject.CheckoutFrom.Remote, err = validateAndReplaceDataWithVariable(gitProject.CheckoutFrom.Remote, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + } + + // validate git remotes + for k := range gitProject.Remotes { + // validate remote map value + if gitProject.Remotes[k], err = validateAndReplaceDataWithVariable(gitProject.Remotes[k], variables); err != nil { + checkForInvalidError(invalidKeys, err) + } + + // validate remote map key + var updatedKey string + if updatedKey, err = validateAndReplaceDataWithVariable(k, variables); err != nil { + checkForInvalidError(invalidKeys, err) + } else if updatedKey != k { + gitProject.Remotes[updatedKey] = gitProject.Remotes[k] + delete(gitProject.Remotes, k) + } + } + + return newInvalidKeysError(invalidKeys) +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/generator/generators.go b/vendor/github.com/devfile/library/pkg/devfile/generator/generators.go index 87878dec58b..913c4fdf05b 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/generator/generators.go +++ b/vendor/github.com/devfile/library/pkg/devfile/generator/generators.go @@ -1,17 +1,21 @@ package generator import ( + "fmt" + + v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/library/pkg/devfile/parser" + "github.com/devfile/library/pkg/devfile/parser/data/v2/common" + "github.com/devfile/library/pkg/util" buildv1 "github.com/openshift/api/build/v1" imagev1 "github.com/openshift/api/image/v1" routev1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/devfile/library/pkg/devfile/parser" - "github.com/devfile/library/pkg/devfile/parser/data/v2/common" ) const ( @@ -26,6 +30,8 @@ const ( deploymentKind = "Deployment" deploymentAPIVersion = "apps/v1" + + containerNameMaxLen = 55 ) // GetTypeMeta gets a type meta of the specified kind and version @@ -49,45 +55,99 @@ func GetObjectMeta(name, namespace string, labels, annotations map[string]string return objectMeta } -// GetContainers iterates through the devfile components and returns a slice of the corresponding containers +// GetContainers iterates through all container components, filters out init containers and returns corresponding containers func GetContainers(devfileObj parser.DevfileObj, options common.DevfileOptions) ([]corev1.Container, error) { - var containers []corev1.Container - containerComponents, err := devfileObj.Data.GetDevfileContainerComponents(options) + allContainers, err := getAllContainers(devfileObj, options) if err != nil { return nil, err } - for _, comp := range containerComponents { - envVars := convertEnvs(comp.Container.Env) - resourceReqs := getResourceReqs(comp) - ports := convertPorts(comp.Container.Endpoints) - containerParams := containerParams{ - Name: comp.Name, - Image: comp.Container.Image, - IsPrivileged: false, - Command: comp.Container.Command, - Args: comp.Container.Args, - EnvVars: envVars, - ResourceReqs: resourceReqs, - Ports: ports, + + // filter out containers for preStart and postStop events + preStartEvents := devfileObj.Data.GetEvents().PreStart + postStopEvents := devfileObj.Data.GetEvents().PostStop + if len(preStartEvents) > 0 || len(postStopEvents) > 0 { + var eventCommands []string + commands, err := devfileObj.Data.GetCommands(common.DevfileOptions{}) + if err != nil { + return nil, err } - container := getContainer(containerParams) - // If `mountSources: true` was set PROJECTS_ROOT & PROJECT_SOURCE env - if comp.Container.MountSources == nil || *comp.Container.MountSources { - syncRootFolder := addSyncRootFolder(container, comp.Container.SourceMapping) + commandsMap := common.GetCommandsMap(commands) - projects, err := devfileObj.Data.GetProjects(common.DevfileOptions{}) - if err != nil { - return nil, err + for _, event := range preStartEvents { + eventSubCommands := common.GetCommandsFromEvent(commandsMap, event) + eventCommands = append(eventCommands, eventSubCommands...) + } + + for _, event := range postStopEvents { + eventSubCommands := common.GetCommandsFromEvent(commandsMap, event) + eventCommands = append(eventCommands, eventSubCommands...) + } + + for _, commandName := range eventCommands { + command, _ := commandsMap[commandName] + component := common.GetApplyComponent(command) + + // Get the container info for the given component + for i, container := range allContainers { + if container.Name == component { + allContainers = append(allContainers[:i], allContainers[i+1:]...) + } } - err = addSyncFolder(container, syncRootFolder, projects) - if err != nil { - return nil, err + } + } + + return allContainers, nil + +} + +// GetInitContainers gets the init container for every preStart devfile event +func GetInitContainers(devfileObj parser.DevfileObj) ([]corev1.Container, error) { + containers, err := getAllContainers(devfileObj, common.DevfileOptions{}) + if err != nil { + return nil, err + } + preStartEvents := devfileObj.Data.GetEvents().PreStart + var initContainers []corev1.Container + if len(preStartEvents) > 0 { + var eventCommands []string + commands, err := devfileObj.Data.GetCommands(common.DevfileOptions{}) + if err != nil { + return nil, err + } + + commandsMap := common.GetCommandsMap(commands) + + for _, event := range preStartEvents { + eventSubCommands := common.GetCommandsFromEvent(commandsMap, event) + eventCommands = append(eventCommands, eventSubCommands...) + } + + for i, commandName := range eventCommands { + command, _ := commandsMap[commandName] + component := common.GetApplyComponent(command) + + // Get the container info for the given component + for _, container := range containers { + if container.Name == component { + // Override the init container name since there cannot be two containers with the same + // name in a pod. This applies to pod containers and pod init containers. The convention + // for init container name here is, containername-eventname- + // If there are two events referencing the same devfile component, then we will have + // tools-event1-1 & tools-event2-3, for example. And if in the edge case, the same command is + // executed twice by preStart events, then we will have tools-event1-1 & tools-event1-2 + initContainerName := fmt.Sprintf("%s-%s", container.Name, commandName) + initContainerName = util.TruncateString(initContainerName, containerNameMaxLen) + initContainerName = fmt.Sprintf("%s-%d", initContainerName, i+1) + container.Name = initContainerName + + initContainers = append(initContainers, container) + } } } - containers = append(containers, *container) } - return containers, nil + + return initContainers, nil } // DeploymentParams is a struct that contains the required data to create a deployment object @@ -189,6 +249,19 @@ func GetIngress(ingressParams IngressParams) *extensionsv1.Ingress { return ingress } +// GetNetworkingV1Ingress gets a networking v1 ingress +func GetNetworkingV1Ingress(ingressParams IngressParams) *networkingv1.Ingress { + ingressSpec := getNetworkingV1IngressSpec(ingressParams.IngressSpecParams) + + ingress := &networkingv1.Ingress{ + TypeMeta: ingressParams.TypeMeta, + ObjectMeta: ingressParams.ObjectMeta, + Spec: *ingressSpec, + } + + return ingress +} + // RouteParams is a struct that contains the required data to create a route object type RouteParams struct { TypeMeta metav1.TypeMeta @@ -284,3 +357,79 @@ func GetImageStream(imageStreamParams ImageStreamParams) imagev1.ImageStream { } return imageStream } + +// VolumeInfo is a struct to hold the pvc name and the volume name to create a volume. +type VolumeInfo struct { + PVCName string + VolumeName string +} + +// VolumeParams is a struct that contains the required data to create Kubernetes Volumes and mount Volumes in Containers +type VolumeParams struct { + // Containers is a list of containers that needs to be updated for the volume mounts + Containers []corev1.Container + + // VolumeNameToVolumeInfo is a map of the devfile volume name to the volume info containing the pvc name and the volume name. + VolumeNameToVolumeInfo map[string]VolumeInfo +} + +// GetVolumesAndVolumeMounts gets the PVC volumes and updates the containers with the volume mounts. +func GetVolumesAndVolumeMounts(devfileObj parser.DevfileObj, volumeParams VolumeParams, options common.DevfileOptions) ([]corev1.Volume, error) { + + options.ComponentOptions = common.ComponentOptions{ + ComponentType: v1.ContainerComponentType, + } + containerComponents, err := devfileObj.Data.GetComponents(options) + if err != nil { + return nil, err + } + + options.ComponentOptions = common.ComponentOptions{ + ComponentType: v1.VolumeComponentType, + } + volumeComponent, err := devfileObj.Data.GetComponents(options) + if err != nil { + return nil, err + } + + var pvcVols []corev1.Volume + for volName, volInfo := range volumeParams.VolumeNameToVolumeInfo { + emptyDirVolume := false + for _, volumeComp := range volumeComponent { + if volumeComp.Name == volName && *volumeComp.Volume.Ephemeral { + emptyDirVolume = true + break + } + } + + // if `ephemeral=true`, a volume with emptyDir should be created + if emptyDirVolume { + pvcVols = append(pvcVols, getEmptyDirVol(volInfo.VolumeName)) + } else { + pvcVols = append(pvcVols, getPVC(volInfo.VolumeName, volInfo.PVCName)) + } + + // containerNameToMountPaths is a map of the Devfile container name to their Devfile Volume Mount Paths for a given Volume Name + containerNameToMountPaths := make(map[string][]string) + for _, containerComp := range containerComponents { + for _, volumeMount := range containerComp.Container.VolumeMounts { + if volName == volumeMount.Name { + containerNameToMountPaths[containerComp.Name] = append(containerNameToMountPaths[containerComp.Name], GetVolumeMountPath(volumeMount)) + } + } + } + + addVolumeMountToContainers(volumeParams.Containers, volInfo.VolumeName, containerNameToMountPaths) + } + return pvcVols, nil +} + +// GetVolumeMountPath gets the volume mount's path. +func GetVolumeMountPath(volumeMount v1.VolumeMount) string { + // if there is no volume mount path, default to volume mount name as per devfile schema + if volumeMount.Path == "" { + volumeMount.Path = "/" + volumeMount.Name + } + + return volumeMount.Path +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/generator/utils.go b/vendor/github.com/devfile/library/pkg/devfile/generator/utils.go index 1bb9ec2dae8..452cb052e1b 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/generator/utils.go +++ b/vendor/github.com/devfile/library/pkg/devfile/generator/utils.go @@ -2,7 +2,9 @@ package generator import ( "fmt" + "github.com/hashicorp/go-multierror" "path/filepath" + "reflect" "strings" v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" @@ -13,6 +15,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -57,17 +60,56 @@ func convertPorts(endpoints []v1.Endpoint) []corev1.ContainerPort { } // getResourceReqs creates a kubernetes ResourceRequirements object based on resource requirements set in the devfile -func getResourceReqs(comp v1.Component) corev1.ResourceRequirements { +func getResourceReqs(comp v1.Component) (corev1.ResourceRequirements, error) { reqs := corev1.ResourceRequirements{} limits := make(corev1.ResourceList) - if comp.Container != nil && comp.Container.MemoryLimit != "" { - memoryLimit, err := resource.ParseQuantity(comp.Container.MemoryLimit) - if err == nil { - limits[corev1.ResourceMemory] = memoryLimit + requests := make(corev1.ResourceList) + var returnedErr error + if comp.Container != nil { + if comp.Container.MemoryLimit != "" { + memoryLimit, err := resource.ParseQuantity(comp.Container.MemoryLimit) + if err != nil { + errMsg := fmt.Errorf("error parsing memoryLimit requirement for component %s: %v", comp.Name, err.Error()) + returnedErr = multierror.Append(returnedErr, errMsg) + } else { + limits[corev1.ResourceMemory] = memoryLimit + } + } + if comp.Container.CpuLimit != "" { + cpuLimit, err := resource.ParseQuantity(comp.Container.CpuLimit) + if err != nil { + errMsg := fmt.Errorf("error parsing cpuLimit requirement for component %s: %v", comp.Name, err.Error()) + returnedErr = multierror.Append(returnedErr, errMsg) + } else { + limits[corev1.ResourceCPU] = cpuLimit + } + } + if comp.Container.MemoryRequest != "" { + memoryRequest, err := resource.ParseQuantity(comp.Container.MemoryRequest) + if err != nil { + errMsg := fmt.Errorf("error parsing memoryRequest requirement for component %s: %v", comp.Name, err.Error()) + returnedErr = multierror.Append(returnedErr, errMsg) + } else { + requests[corev1.ResourceMemory] = memoryRequest + } + } + if comp.Container.CpuRequest != "" { + cpuRequest, err := resource.ParseQuantity(comp.Container.CpuRequest) + if err != nil { + errMsg := fmt.Errorf("error parsing cpuRequest requirement for component %s: %v", comp.Name, err.Error()) + returnedErr = multierror.Append(returnedErr, errMsg) + } else { + requests[corev1.ResourceCPU] = cpuRequest + } + } + if !reflect.DeepEqual(limits, corev1.ResourceList{}) { + reqs.Limits = limits + } + if !reflect.DeepEqual(requests, corev1.ResourceList{}) { + reqs.Requests = requests } - reqs.Limits = limits } - return reqs + return reqs, returnedErr } // addSyncRootFolder adds the sync root folder to the container env @@ -254,7 +296,10 @@ func getServiceSpec(devfileObj parser.DevfileObj, selectorLabels map[string]stri // exposure level: public > internal > none func getPortExposure(devfileObj parser.DevfileObj, options common.DevfileOptions) (map[int]v1.EndpointExposure, error) { portExposureMap := make(map[int]v1.EndpointExposure) - containerComponents, err := devfileObj.Data.GetDevfileContainerComponents(options) + options.ComponentOptions = common.ComponentOptions{ + ComponentType: v1.ContainerComponentType, + } + containerComponents, err := devfileObj.Data.GetComponents(options) if err != nil { return portExposureMap, err } @@ -332,6 +377,54 @@ func getIngressSpec(ingressSpecParams IngressSpecParams) *extensionsv1.IngressSp return ingressSpec } +// getNetworkingV1IngressSpec gets a networking v1 ingress spec +func getNetworkingV1IngressSpec(ingressSpecParams IngressSpecParams) *networkingv1.IngressSpec { + path := "/" + pathTypeImplementationSpecific := networkingv1.PathTypeImplementationSpecific + if ingressSpecParams.Path != "" { + path = ingressSpecParams.Path + } + ingressSpec := &networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + { + Host: ingressSpecParams.IngressDomain, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: path, + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: ingressSpecParams.ServiceName, + Port: networkingv1.ServiceBackendPort{ + Number: ingressSpecParams.PortNumber.IntVal, + }, + }, + }, + //Field is required to be set based on attempt to create the ingress + PathType: &pathTypeImplementationSpecific, + }, + }, + }, + }, + }, + }, + } + secretNameLength := len(ingressSpecParams.TLSSecretName) + if secretNameLength != 0 { + ingressSpec.TLS = []networkingv1.IngressTLS{ + { + Hosts: []string{ + ingressSpecParams.IngressDomain, + }, + SecretName: ingressSpecParams.TLSSecretName, + }, + } + } + + return ingressSpec +} + // RouteSpecParams struct for function GenerateRouteSpec // serviceName is the name of the service for the target reference // portNumber is the target port of the ingress @@ -419,3 +512,93 @@ func getBuildConfigSpec(buildConfigSpecParams BuildConfigSpecParams) *buildv1.Bu }, } } + +// getPVC gets a pvc type volume with the given volume name and pvc name. +func getPVC(volumeName, pvcName string) corev1.Volume { + + return corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + } +} + +// getEmptyDirVol gets a volume with emptyDir +func getEmptyDirVol(volumeName string) corev1.Volume { + return corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } +} + +// addVolumeMountToContainers adds the Volume Mounts in containerNameToMountPaths to the containers for a given volumeName. +// containerNameToMountPaths is a map of a container name to an array of its Mount Paths. +func addVolumeMountToContainers(containers []corev1.Container, volumeName string, containerNameToMountPaths map[string][]string) { + + for containerName, mountPaths := range containerNameToMountPaths { + for i := range containers { + if containers[i].Name == containerName { + for _, mountPath := range mountPaths { + containers[i].VolumeMounts = append(containers[i].VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: mountPath, + }, + ) + } + } + } + } +} + +// getAllContainers iterates through the devfile components and returns all container components +func getAllContainers(devfileObj parser.DevfileObj, options common.DevfileOptions) ([]corev1.Container, error) { + var containers []corev1.Container + + options.ComponentOptions = common.ComponentOptions{ + ComponentType: v1.ContainerComponentType, + } + containerComponents, err := devfileObj.Data.GetComponents(options) + if err != nil { + return nil, err + } + for _, comp := range containerComponents { + envVars := convertEnvs(comp.Container.Env) + resourceReqs, err := getResourceReqs(comp) + if err != nil { + return containers, err + } + ports := convertPorts(comp.Container.Endpoints) + containerParams := containerParams{ + Name: comp.Name, + Image: comp.Container.Image, + IsPrivileged: false, + Command: comp.Container.Command, + Args: comp.Container.Args, + EnvVars: envVars, + ResourceReqs: resourceReqs, + Ports: ports, + } + container := getContainer(containerParams) + + // If `mountSources: true` was set PROJECTS_ROOT & PROJECT_SOURCE env + if comp.Container.MountSources == nil || *comp.Container.MountSources { + syncRootFolder := addSyncRootFolder(container, comp.Container.SourceMapping) + + projects, err := devfileObj.Data.GetProjects(common.DevfileOptions{}) + if err != nil { + return nil, err + } + err = addSyncFolder(container, syncRootFolder, projects) + if err != nil { + return nil, err + } + } + containers = append(containers, *container) + } + return containers, nil +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parse.go b/vendor/github.com/devfile/library/pkg/devfile/parse.go index cebc9fab7a2..6eba767f78d 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parse.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parse.go @@ -1,6 +1,7 @@ package devfile import ( + "github.com/devfile/api/v2/pkg/validation/variables" "github.com/devfile/library/pkg/devfile/parser" "github.com/devfile/library/pkg/devfile/validate" ) @@ -9,6 +10,7 @@ import ( // and validates the devfile integrity with the schema // and validates the devfile data. // Creates devfile context and runtime objects. +// Deprecated, use ParseDevfileAndValidate() instead func ParseFromURLAndValidate(url string) (d parser.DevfileObj, err error) { // read and parse devfile from the given URL @@ -30,6 +32,7 @@ func ParseFromURLAndValidate(url string) (d parser.DevfileObj, err error) { // and validates the devfile integrity with the schema // and validates the devfile data. // Creates devfile context and runtime objects. +// Deprecated, use ParseDevfileAndValidate() instead func ParseFromDataAndValidate(data []byte) (d parser.DevfileObj, err error) { // read and parse devfile from the given bytes d, err = parser.ParseFromData(data) @@ -49,6 +52,7 @@ func ParseFromDataAndValidate(data []byte) (d parser.DevfileObj, err error) { // and validates the devfile integrity with the schema // and validates the devfile data. // Creates devfile context and runtime objects. +// Deprecated, use ParseDevfileAndValidate() instead func ParseAndValidate(path string) (d parser.DevfileObj, err error) { // read and parse devfile from given path @@ -65,3 +69,26 @@ func ParseAndValidate(path string) (d parser.DevfileObj, err error) { return d, err } + +// ParseDevfileAndValidate func parses the devfile data, validates the devfile integrity with the schema +// replaces the top-level variable keys if present and validates the devfile data. +// It returns devfile context and runtime objects, variable substitution warning if any and an error. +func ParseDevfileAndValidate(args parser.ParserArgs) (d parser.DevfileObj, varWarning variables.VariableWarning, err error) { + d, err = parser.ParseDevfile(args) + if err != nil { + return d, varWarning, err + } + + if d.Data.GetSchemaVersion() != "2.0.0" { + // replace the top level variable keys with their values in the devfile + varWarning = variables.ValidateAndReplaceGlobalVariable(d.Data.GetDevfileWorkspaceSpec()) + } + + // generic validation on devfile content + err = validate.ValidateDevfileData(d.Data) + if err != nil { + return d, varWarning, err + } + + return d, varWarning, err +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/configurables.go b/vendor/github.com/devfile/library/pkg/devfile/parser/configurables.go index 8bef2197e27..f25437ddf1e 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/configurables.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/configurables.go @@ -22,7 +22,8 @@ const ( // SetMetadataName set metadata name in a devfile func (d DevfileObj) SetMetadataName(name string) error { metadata := d.Data.GetMetadata() - d.Data.SetMetadata(name, metadata.Version) + metadata.Name = name + d.Data.SetMetadata(metadata) return d.WriteYamlDevfile() } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/context/apiVersion.go b/vendor/github.com/devfile/library/pkg/devfile/parser/context/apiVersion.go index 2f05a209f8b..e07d706dc9f 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/context/apiVersion.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/context/apiVersion.go @@ -3,6 +3,7 @@ package parser import ( "encoding/json" "fmt" + "strings" "github.com/devfile/library/pkg/devfile/parser/data" "github.com/pkg/errors" @@ -19,35 +20,29 @@ func (d *DevfileCtx) SetDevfileAPIVersion() error { return errors.Wrapf(err, "failed to decode devfile json") } - var apiVer string - - // Get "apiVersion" value from map for devfile V1 - apiVersion, okApi := r["apiVersion"] - // Get "schemaVersion" value from map for devfile V2 schemaVersion, okSchema := r["schemaVersion"] + var devfilePath string + if d.GetAbsPath() != "" { + devfilePath = d.GetAbsPath() + } else if d.GetURL() != "" { + devfilePath = d.GetURL() + } - if okApi { - apiVer = apiVersion.(string) - // apiVersion cannot be empty - if apiVer == "" { - return fmt.Errorf("apiVersion in devfile cannot be empty") - } - - } else if okSchema { - apiVer = schemaVersion.(string) + if okSchema { // SchemaVersion cannot be empty if schemaVersion.(string) == "" { - return fmt.Errorf("schemaVersion in devfile cannot be empty") + return fmt.Errorf("schemaVersion in devfile: %s cannot be empty", devfilePath) } } else { - return fmt.Errorf("apiVersion or schemaVersion not present in devfile") - + return fmt.Errorf("schemaVersion not present in devfile: %s", devfilePath) } // Successful - d.apiVersion = apiVer - klog.V(4).Infof("devfile apiVersion: '%s'", d.apiVersion) + // split by `-` and get the first substring as schema version, schemaVersion without `-` won't get affected + // e.g. 2.2.0-latest => 2.2.0, 2.2.0 => 2.2.0 + d.apiVersion = strings.Split(schemaVersion.(string), "-")[0] + klog.V(4).Infof("devfile schemaVersion: '%s'", d.apiVersion) return nil } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/context/content.go b/vendor/github.com/devfile/library/pkg/devfile/parser/context/content.go index e9d817399d5..a4a4ddf163c 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/context/content.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/context/content.go @@ -5,9 +5,9 @@ import ( "unicode" "github.com/devfile/library/pkg/util" - "github.com/ghodss/yaml" "github.com/pkg/errors" "k8s.io/klog" + "sigs.k8s.io/yaml" ) // Every JSON document starts with "{" @@ -54,7 +54,7 @@ func (d *DevfileCtx) SetDevfileContent() error { if d.url != "" { data, err = util.DownloadFileInMemory(d.url) if err != nil { - return errors.Wrap(err, "error getting parent info from url") + return errors.Wrap(err, "error getting devfile info from url") } } else if d.absPath != "" { // Read devfile diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/context/context.go b/vendor/github.com/devfile/library/pkg/devfile/parser/context/context.go index 252a920cf39..64e3b5d4cc6 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/context/context.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/context/context.go @@ -3,14 +3,15 @@ package parser import ( "fmt" "net/url" + "os" + "path/filepath" + "strings" "github.com/devfile/library/pkg/testingutil/filesystem" "github.com/devfile/library/pkg/util" "k8s.io/klog" ) -var URIMap = make(map[string]bool) - // DevfileCtx stores context info regarding devfile type DevfileCtx struct { @@ -51,6 +52,15 @@ func NewURLDevfileCtx(url string) DevfileCtx { } } +// NewByteContentDevfileCtx set devfile content from byte data and returns a new DevfileCtx type object and error +func NewByteContentDevfileCtx(data []byte) (d DevfileCtx, err error) { + err = d.SetDevfileContentFromBytes(data) + if err != nil { + return DevfileCtx{}, err + } + return d, nil +} + // populateDevfile checks the API version is supported and returns the JSON schema for the given devfile API Version func (d *DevfileCtx) populateDevfile() (err error) { @@ -65,15 +75,21 @@ func (d *DevfileCtx) populateDevfile() (err error) { // Populate fills the DevfileCtx struct with relevant context info func (d *DevfileCtx) Populate() (err error) { - + if !strings.HasSuffix(d.relPath, ".yaml") { + if _, err := os.Stat(filepath.Join(d.relPath, "devfile.yaml")); os.IsNotExist(err) { + if _, err := os.Stat(filepath.Join(d.relPath, ".devfile.yaml")); os.IsNotExist(err) { + return fmt.Errorf("the provided path is not a valid yaml filepath, and devfile.yaml or .devfile.yaml not found in the provided path : %s", d.relPath) + } else { + d.relPath = filepath.Join(d.relPath, ".devfile.yaml") + } + } else { + d.relPath = filepath.Join(d.relPath, "devfile.yaml") + } + } if err := d.SetAbsPath(); err != nil { return err } klog.V(4).Infof("absolute devfile path: '%s'", d.absPath) - if URIMap[d.absPath] { - return fmt.Errorf("URI %v is recursively referenced", d.absPath) - } - URIMap[d.absPath] = true // Read and save devfile content if err := d.SetDevfileContent(); err != nil { return err @@ -83,15 +99,10 @@ func (d *DevfileCtx) Populate() (err error) { // PopulateFromURL fills the DevfileCtx struct with relevant context info func (d *DevfileCtx) PopulateFromURL() (err error) { - _, err = url.ParseRequestURI(d.url) if err != nil { return err } - if URIMap[d.url] { - return fmt.Errorf("URI %v is recursively referenced", d.url) - } - URIMap[d.url] = true // Read and save devfile content if err := d.SetDevfileContent(); err != nil { return err diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/interface.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/interface.go index 732a15cfa5f..5721ca1da83 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/interface.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/interface.go @@ -2,56 +2,84 @@ package data import ( v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/api/v2/pkg/attributes" devfilepkg "github.com/devfile/api/v2/pkg/devfile" "github.com/devfile/library/pkg/devfile/parser/data/v2/common" ) +// Generate mock interfaces for DevfileData by executing the following cmd in pkg/devfile/parser/data +// mockgen -package=data -source=interface.go DevfileData > /tmp/mock_interface.go ; cp /tmp/mock_interface.go ./mock_interface.go + // DevfileData is an interface that defines functions for Devfile data operations type DevfileData interface { + + // header related methods + GetSchemaVersion() string SetSchemaVersion(version string) GetMetadata() devfilepkg.DevfileMetadata - SetMetadata(name, version string) + SetMetadata(metadata devfilepkg.DevfileMetadata) + + // top-level attributes related method + + GetAttributes() (attributes.Attributes, error) + AddAttributes(key string, value interface{}) error + UpdateAttributes(key string, value interface{}) error // parent related methods + GetParent() *v1.Parent SetParent(parent *v1.Parent) // event related methods + GetEvents() v1.Events AddEvents(events v1.Events) error UpdateEvents(postStart, postStop, preStart, preStop []string) // component related methods + GetComponents(common.DevfileOptions) ([]v1.Component, error) AddComponents(components []v1.Component) error - UpdateComponent(component v1.Component) + UpdateComponent(component v1.Component) error + DeleteComponent(name string) error // project related methods + GetProjects(common.DevfileOptions) ([]v1.Project, error) AddProjects(projects []v1.Project) error - UpdateProject(project v1.Project) + UpdateProject(project v1.Project) error + DeleteProject(name string) error // starter projects related commands + GetStarterProjects(common.DevfileOptions) ([]v1.StarterProject, error) AddStarterProjects(projects []v1.StarterProject) error - UpdateStarterProject(project v1.StarterProject) + UpdateStarterProject(project v1.StarterProject) error + DeleteStarterProject(name string) error // command related methods + GetCommands(common.DevfileOptions) ([]v1.Command, error) - AddCommands(commands ...v1.Command) error - UpdateCommand(command v1.Command) + AddCommands(commands []v1.Command) error + UpdateCommand(command v1.Command) error + DeleteCommand(id string) error - // volume related methods - AddVolume(volume v1.Component, path string) error - DeleteVolume(name string) error - GetVolumeMountPath(name string) (string, error) + // volume mount related methods + + AddVolumeMounts(containerName string, volumeMounts []v1.VolumeMount) error + DeleteVolumeMount(name string) error + GetVolumeMountPaths(mountName, containerName string) ([]string, error) // workspace related methods - GetDevfileWorkspace() *v1.DevWorkspaceTemplateSpecContent - SetDevfileWorkspace(content v1.DevWorkspaceTemplateSpecContent) - //utils + GetDevfileWorkspaceSpecContent() *v1.DevWorkspaceTemplateSpecContent + SetDevfileWorkspaceSpecContent(content v1.DevWorkspaceTemplateSpecContent) + GetDevfileWorkspaceSpec() *v1.DevWorkspaceTemplateSpec + SetDevfileWorkspaceSpec(spec v1.DevWorkspaceTemplateSpec) + + // utils + GetDevfileContainerComponents(common.DevfileOptions) ([]v1.Component, error) GetDevfileVolumeComponents(common.DevfileOptions) ([]v1.Component, error) } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/mock_interface.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/mock_interface.go new file mode 100644 index 00000000000..89951d41fdf --- /dev/null +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/mock_interface.go @@ -0,0 +1,552 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package data is a generated GoMock package. +package data + +import ( + reflect "reflect" + + v1alpha2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + attributes "github.com/devfile/api/v2/pkg/attributes" + devfile "github.com/devfile/api/v2/pkg/devfile" + common "github.com/devfile/library/pkg/devfile/parser/data/v2/common" + gomock "github.com/golang/mock/gomock" +) + +// MockDevfileData is a mock of DevfileData interface. +type MockDevfileData struct { + ctrl *gomock.Controller + recorder *MockDevfileDataMockRecorder +} + +// MockDevfileDataMockRecorder is the mock recorder for MockDevfileData. +type MockDevfileDataMockRecorder struct { + mock *MockDevfileData +} + +// NewMockDevfileData creates a new mock instance. +func NewMockDevfileData(ctrl *gomock.Controller) *MockDevfileData { + mock := &MockDevfileData{ctrl: ctrl} + mock.recorder = &MockDevfileDataMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDevfileData) EXPECT() *MockDevfileDataMockRecorder { + return m.recorder +} + +// AddAttributes mocks base method. +func (m *MockDevfileData) AddAttributes(key string, value interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddAttributes", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddAttributes indicates an expected call of AddAttributes. +func (mr *MockDevfileDataMockRecorder) AddAttributes(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAttributes", reflect.TypeOf((*MockDevfileData)(nil).AddAttributes), key, value) +} + +// AddCommands mocks base method. +func (m *MockDevfileData) AddCommands(commands []v1alpha2.Command) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddCommands", commands) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddCommands indicates an expected call of AddCommands. +func (mr *MockDevfileDataMockRecorder) AddCommands(commands interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddCommands", reflect.TypeOf((*MockDevfileData)(nil).AddCommands), commands) +} + +// AddComponents mocks base method. +func (m *MockDevfileData) AddComponents(components []v1alpha2.Component) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddComponents", components) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddComponents indicates an expected call of AddComponents. +func (mr *MockDevfileDataMockRecorder) AddComponents(components interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddComponents", reflect.TypeOf((*MockDevfileData)(nil).AddComponents), components) +} + +// AddEvents mocks base method. +func (m *MockDevfileData) AddEvents(events v1alpha2.Events) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddEvents", events) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddEvents indicates an expected call of AddEvents. +func (mr *MockDevfileDataMockRecorder) AddEvents(events interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEvents", reflect.TypeOf((*MockDevfileData)(nil).AddEvents), events) +} + +// AddProjects mocks base method. +func (m *MockDevfileData) AddProjects(projects []v1alpha2.Project) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddProjects", projects) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddProjects indicates an expected call of AddProjects. +func (mr *MockDevfileDataMockRecorder) AddProjects(projects interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProjects", reflect.TypeOf((*MockDevfileData)(nil).AddProjects), projects) +} + +// AddStarterProjects mocks base method. +func (m *MockDevfileData) AddStarterProjects(projects []v1alpha2.StarterProject) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddStarterProjects", projects) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddStarterProjects indicates an expected call of AddStarterProjects. +func (mr *MockDevfileDataMockRecorder) AddStarterProjects(projects interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStarterProjects", reflect.TypeOf((*MockDevfileData)(nil).AddStarterProjects), projects) +} + +// AddVolumeMounts mocks base method. +func (m *MockDevfileData) AddVolumeMounts(containerName string, volumeMounts []v1alpha2.VolumeMount) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddVolumeMounts", containerName, volumeMounts) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddVolumeMounts indicates an expected call of AddVolumeMounts. +func (mr *MockDevfileDataMockRecorder) AddVolumeMounts(containerName, volumeMounts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddVolumeMounts", reflect.TypeOf((*MockDevfileData)(nil).AddVolumeMounts), containerName, volumeMounts) +} + +// DeleteCommand mocks base method. +func (m *MockDevfileData) DeleteCommand(id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCommand", id) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteCommand indicates an expected call of DeleteCommand. +func (mr *MockDevfileDataMockRecorder) DeleteCommand(id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCommand", reflect.TypeOf((*MockDevfileData)(nil).DeleteCommand), id) +} + +// DeleteComponent mocks base method. +func (m *MockDevfileData) DeleteComponent(name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteComponent", name) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteComponent indicates an expected call of DeleteComponent. +func (mr *MockDevfileDataMockRecorder) DeleteComponent(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteComponent", reflect.TypeOf((*MockDevfileData)(nil).DeleteComponent), name) +} + +// DeleteProject mocks base method. +func (m *MockDevfileData) DeleteProject(name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteProject", name) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteProject indicates an expected call of DeleteProject. +func (mr *MockDevfileDataMockRecorder) DeleteProject(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteProject", reflect.TypeOf((*MockDevfileData)(nil).DeleteProject), name) +} + +// DeleteStarterProject mocks base method. +func (m *MockDevfileData) DeleteStarterProject(name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteStarterProject", name) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteStarterProject indicates an expected call of DeleteStarterProject. +func (mr *MockDevfileDataMockRecorder) DeleteStarterProject(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteStarterProject", reflect.TypeOf((*MockDevfileData)(nil).DeleteStarterProject), name) +} + +// DeleteVolumeMount mocks base method. +func (m *MockDevfileData) DeleteVolumeMount(name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteVolumeMount", name) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteVolumeMount indicates an expected call of DeleteVolumeMount. +func (mr *MockDevfileDataMockRecorder) DeleteVolumeMount(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolumeMount", reflect.TypeOf((*MockDevfileData)(nil).DeleteVolumeMount), name) +} + +// GetAttributes mocks base method. +func (m *MockDevfileData) GetAttributes() (attributes.Attributes, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAttributes") + ret0, _ := ret[0].(attributes.Attributes) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAttributes indicates an expected call of GetAttributes. +func (mr *MockDevfileDataMockRecorder) GetAttributes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttributes", reflect.TypeOf((*MockDevfileData)(nil).GetAttributes)) +} + +// GetCommands mocks base method. +func (m *MockDevfileData) GetCommands(arg0 common.DevfileOptions) ([]v1alpha2.Command, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCommands", arg0) + ret0, _ := ret[0].([]v1alpha2.Command) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCommands indicates an expected call of GetCommands. +func (mr *MockDevfileDataMockRecorder) GetCommands(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommands", reflect.TypeOf((*MockDevfileData)(nil).GetCommands), arg0) +} + +// GetComponents mocks base method. +func (m *MockDevfileData) GetComponents(arg0 common.DevfileOptions) ([]v1alpha2.Component, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetComponents", arg0) + ret0, _ := ret[0].([]v1alpha2.Component) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetComponents indicates an expected call of GetComponents. +func (mr *MockDevfileDataMockRecorder) GetComponents(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetComponents", reflect.TypeOf((*MockDevfileData)(nil).GetComponents), arg0) +} + +// GetDevfileContainerComponents mocks base method. +func (m *MockDevfileData) GetDevfileContainerComponents(arg0 common.DevfileOptions) ([]v1alpha2.Component, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDevfileContainerComponents", arg0) + ret0, _ := ret[0].([]v1alpha2.Component) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDevfileContainerComponents indicates an expected call of GetDevfileContainerComponents. +func (mr *MockDevfileDataMockRecorder) GetDevfileContainerComponents(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileContainerComponents", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileContainerComponents), arg0) +} + +// GetDevfileVolumeComponents mocks base method. +func (m *MockDevfileData) GetDevfileVolumeComponents(arg0 common.DevfileOptions) ([]v1alpha2.Component, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDevfileVolumeComponents", arg0) + ret0, _ := ret[0].([]v1alpha2.Component) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDevfileVolumeComponents indicates an expected call of GetDevfileVolumeComponents. +func (mr *MockDevfileDataMockRecorder) GetDevfileVolumeComponents(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileVolumeComponents", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileVolumeComponents), arg0) +} + +// GetDevfileWorkspaceSpec mocks base method. +func (m *MockDevfileData) GetDevfileWorkspaceSpec() *v1alpha2.DevWorkspaceTemplateSpec { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDevfileWorkspaceSpec") + ret0, _ := ret[0].(*v1alpha2.DevWorkspaceTemplateSpec) + return ret0 +} + +// GetDevfileWorkspaceSpec indicates an expected call of GetDevfileWorkspaceSpec. +func (mr *MockDevfileDataMockRecorder) GetDevfileWorkspaceSpec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileWorkspaceSpec", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileWorkspaceSpec)) +} + +// GetDevfileWorkspaceSpecContent mocks base method. +func (m *MockDevfileData) GetDevfileWorkspaceSpecContent() *v1alpha2.DevWorkspaceTemplateSpecContent { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDevfileWorkspaceSpecContent") + ret0, _ := ret[0].(*v1alpha2.DevWorkspaceTemplateSpecContent) + return ret0 +} + +// GetDevfileWorkspaceSpecContent indicates an expected call of GetDevfileWorkspaceSpecContent. +func (mr *MockDevfileDataMockRecorder) GetDevfileWorkspaceSpecContent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileWorkspaceSpecContent", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileWorkspaceSpecContent)) +} + +// GetEvents mocks base method. +func (m *MockDevfileData) GetEvents() v1alpha2.Events { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEvents") + ret0, _ := ret[0].(v1alpha2.Events) + return ret0 +} + +// GetEvents indicates an expected call of GetEvents. +func (mr *MockDevfileDataMockRecorder) GetEvents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEvents", reflect.TypeOf((*MockDevfileData)(nil).GetEvents)) +} + +// GetMetadata mocks base method. +func (m *MockDevfileData) GetMetadata() devfile.DevfileMetadata { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMetadata") + ret0, _ := ret[0].(devfile.DevfileMetadata) + return ret0 +} + +// GetMetadata indicates an expected call of GetMetadata. +func (mr *MockDevfileDataMockRecorder) GetMetadata() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockDevfileData)(nil).GetMetadata)) +} + +// GetParent mocks base method. +func (m *MockDevfileData) GetParent() *v1alpha2.Parent { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetParent") + ret0, _ := ret[0].(*v1alpha2.Parent) + return ret0 +} + +// GetParent indicates an expected call of GetParent. +func (mr *MockDevfileDataMockRecorder) GetParent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParent", reflect.TypeOf((*MockDevfileData)(nil).GetParent)) +} + +// GetProjects mocks base method. +func (m *MockDevfileData) GetProjects(arg0 common.DevfileOptions) ([]v1alpha2.Project, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProjects", arg0) + ret0, _ := ret[0].([]v1alpha2.Project) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProjects indicates an expected call of GetProjects. +func (mr *MockDevfileDataMockRecorder) GetProjects(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProjects", reflect.TypeOf((*MockDevfileData)(nil).GetProjects), arg0) +} + +// GetSchemaVersion mocks base method. +func (m *MockDevfileData) GetSchemaVersion() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSchemaVersion") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetSchemaVersion indicates an expected call of GetSchemaVersion. +func (mr *MockDevfileDataMockRecorder) GetSchemaVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchemaVersion", reflect.TypeOf((*MockDevfileData)(nil).GetSchemaVersion)) +} + +// GetStarterProjects mocks base method. +func (m *MockDevfileData) GetStarterProjects(arg0 common.DevfileOptions) ([]v1alpha2.StarterProject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStarterProjects", arg0) + ret0, _ := ret[0].([]v1alpha2.StarterProject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStarterProjects indicates an expected call of GetStarterProjects. +func (mr *MockDevfileDataMockRecorder) GetStarterProjects(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStarterProjects", reflect.TypeOf((*MockDevfileData)(nil).GetStarterProjects), arg0) +} + +// GetVolumeMountPaths mocks base method. +func (m *MockDevfileData) GetVolumeMountPaths(mountName, containerName string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVolumeMountPaths", mountName, containerName) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVolumeMountPaths indicates an expected call of GetVolumeMountPaths. +func (mr *MockDevfileDataMockRecorder) GetVolumeMountPaths(mountName, containerName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumeMountPaths", reflect.TypeOf((*MockDevfileData)(nil).GetVolumeMountPaths), mountName, containerName) +} + +// SetDevfileWorkspaceSpec mocks base method. +func (m *MockDevfileData) SetDevfileWorkspaceSpec(spec v1alpha2.DevWorkspaceTemplateSpec) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDevfileWorkspaceSpec", spec) +} + +// SetDevfileWorkspaceSpec indicates an expected call of SetDevfileWorkspaceSpec. +func (mr *MockDevfileDataMockRecorder) SetDevfileWorkspaceSpec(spec interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDevfileWorkspaceSpec", reflect.TypeOf((*MockDevfileData)(nil).SetDevfileWorkspaceSpec), spec) +} + +// SetDevfileWorkspaceSpecContent mocks base method. +func (m *MockDevfileData) SetDevfileWorkspaceSpecContent(content v1alpha2.DevWorkspaceTemplateSpecContent) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetDevfileWorkspaceSpecContent", content) +} + +// SetDevfileWorkspaceSpecContent indicates an expected call of SetDevfileWorkspaceSpecContent. +func (mr *MockDevfileDataMockRecorder) SetDevfileWorkspaceSpecContent(content interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDevfileWorkspaceSpecContent", reflect.TypeOf((*MockDevfileData)(nil).SetDevfileWorkspaceSpecContent), content) +} + +// SetMetadata mocks base method. +func (m *MockDevfileData) SetMetadata(metadata devfile.DevfileMetadata) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetMetadata", metadata) +} + +// SetMetadata indicates an expected call of SetMetadata. +func (mr *MockDevfileDataMockRecorder) SetMetadata(metadata interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMetadata", reflect.TypeOf((*MockDevfileData)(nil).SetMetadata), metadata) +} + +// SetParent mocks base method. +func (m *MockDevfileData) SetParent(parent *v1alpha2.Parent) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetParent", parent) +} + +// SetParent indicates an expected call of SetParent. +func (mr *MockDevfileDataMockRecorder) SetParent(parent interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetParent", reflect.TypeOf((*MockDevfileData)(nil).SetParent), parent) +} + +// SetSchemaVersion mocks base method. +func (m *MockDevfileData) SetSchemaVersion(version string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSchemaVersion", version) +} + +// SetSchemaVersion indicates an expected call of SetSchemaVersion. +func (mr *MockDevfileDataMockRecorder) SetSchemaVersion(version interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSchemaVersion", reflect.TypeOf((*MockDevfileData)(nil).SetSchemaVersion), version) +} + +// UpdateAttributes mocks base method. +func (m *MockDevfileData) UpdateAttributes(key string, value interface{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAttributes", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateAttributes indicates an expected call of UpdateAttributes. +func (mr *MockDevfileDataMockRecorder) UpdateAttributes(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAttributes", reflect.TypeOf((*MockDevfileData)(nil).UpdateAttributes), key, value) +} + +// UpdateCommand mocks base method. +func (m *MockDevfileData) UpdateCommand(command v1alpha2.Command) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateCommand", command) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateCommand indicates an expected call of UpdateCommand. +func (mr *MockDevfileDataMockRecorder) UpdateCommand(command interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCommand", reflect.TypeOf((*MockDevfileData)(nil).UpdateCommand), command) +} + +// UpdateComponent mocks base method. +func (m *MockDevfileData) UpdateComponent(component v1alpha2.Component) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateComponent", component) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateComponent indicates an expected call of UpdateComponent. +func (mr *MockDevfileDataMockRecorder) UpdateComponent(component interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateComponent", reflect.TypeOf((*MockDevfileData)(nil).UpdateComponent), component) +} + +// UpdateEvents mocks base method. +func (m *MockDevfileData) UpdateEvents(postStart, postStop, preStart, preStop []string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateEvents", postStart, postStop, preStart, preStop) +} + +// UpdateEvents indicates an expected call of UpdateEvents. +func (mr *MockDevfileDataMockRecorder) UpdateEvents(postStart, postStop, preStart, preStop interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEvents", reflect.TypeOf((*MockDevfileData)(nil).UpdateEvents), postStart, postStop, preStart, preStop) +} + +// UpdateProject mocks base method. +func (m *MockDevfileData) UpdateProject(project v1alpha2.Project) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateProject", project) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateProject indicates an expected call of UpdateProject. +func (mr *MockDevfileDataMockRecorder) UpdateProject(project interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProject", reflect.TypeOf((*MockDevfileData)(nil).UpdateProject), project) +} + +// UpdateStarterProject mocks base method. +func (m *MockDevfileData) UpdateStarterProject(project v1alpha2.StarterProject) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateStarterProject", project) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateStarterProject indicates an expected call of UpdateStarterProject. +func (mr *MockDevfileDataMockRecorder) UpdateStarterProject(project interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStarterProject", reflect.TypeOf((*MockDevfileData)(nil).UpdateStarterProject), project) +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/2.1.0/devfileJsonSchema210.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/2.1.0/devfileJsonSchema210.go index 5bf3fac4829..7145fd0bed1 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/2.1.0/devfileJsonSchema210.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/2.1.0/devfileJsonSchema210.go @@ -1,16 +1,21 @@ package version210 -// https://raw.githubusercontent.com/devfile/api/master/schemas/latest/devfile.json +// https://raw.githubusercontent.com/devfile/api/2.1.x/schemas/latest/devfile.json const JsonSchema210 = `{ - "description": "Devfile describes the structure of a cloud-native workspace and development environment.", + "description": "Devfile describes the structure of a cloud-native devworkspace and development environment.", "type": "object", - "title": "Devfile schema - Version 2.1.0-alpha", + "title": "Devfile schema - Version 2.1.0", "required": [ "schemaVersion" ], "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, "commands": { - "description": "Predefined, ready-to-use, workspace-related commands", + "description": "Predefined, ready-to-use, devworkspace-related commands", "type": "array", "items": { "type": "object", @@ -28,16 +33,6 @@ const JsonSchema210 = `{ "apply" ] }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, { "required": [ "composite" @@ -46,7 +41,7 @@ const JsonSchema210 = `{ ], "properties": { "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", + "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.", "type": "object", "required": [ "component" @@ -218,115 +213,13 @@ const JsonSchema210 = `{ "type": "string", "maxLength": 63, "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false } }, "additionalProperties": false } }, "components": { - "description": "List of the workspace components, such as editor and plugins, user-provided containers, or other types of components", + "description": "List of the devworkspace components, such as editor and plugins, user-provided containers, or other types of components", "type": "array", "items": { "type": "object", @@ -353,11 +246,6 @@ const JsonSchema210 = `{ "required": [ "volume" ] - }, - { - "required": [ - "plugin" - ] } ], "properties": { @@ -367,7 +255,7 @@ const JsonSchema210 = `{ "additionalProperties": true }, "container": { - "description": "Allows adding and configuring workspace-related containers", + "description": "Allows adding and configuring devworkspace-related containers", "type": "object", "required": [ "image" @@ -412,7 +300,7 @@ const JsonSchema210 = `{ "additionalProperties": true }, "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", "type": "string", "default": "public", "enum": [ @@ -520,7 +408,7 @@ const JsonSchema210 = `{ "additionalProperties": false }, "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", + "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", "type": "object", "oneOf": [ { @@ -550,7 +438,7 @@ const JsonSchema210 = `{ "additionalProperties": true }, "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", "type": "string", "default": "public", "enum": [ @@ -610,7 +498,7 @@ const JsonSchema210 = `{ "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" }, "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", + "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", "type": "object", "oneOf": [ { @@ -640,7 +528,7 @@ const JsonSchema210 = `{ "additionalProperties": true }, "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", "type": "string", "default": "public", "enum": [ @@ -693,867 +581,170 @@ const JsonSchema210 = `{ }, "additionalProperties": false }, - "plugin": { - "description": "Allows importing a plugin.\n\nPlugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as 'DevWorkspaceTemplate' Kubernetes Custom Resources", + "volume": { + "description": "Allows specifying the definition of a volume shared by several other components", + "type": "object", + "properties": { + "ephemeral": { + "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", + "type": "boolean" + }, + "size": { + "description": "Size of the volume", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "events": { + "description": "Bindings of commands to events. Each command is referred-to by its name.", + "type": "object", + "properties": { + "postStart": { + "description": "IDs of commands that should be executed after the devworkspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", + "type": "array", + "items": { + "type": "string" + } + }, + "postStop": { + "description": "IDs of commands that should be executed after stopping the devworkspace.", + "type": "array", + "items": { + "type": "string" + } + }, + "preStart": { + "description": "IDs of commands that should be executed before the devworkspace start. Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD.", + "type": "array", + "items": { + "type": "string" + } + }, + "preStop": { + "description": "IDs of commands that should be executed before stopping the devworkspace.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "metadata": { + "description": "Optional metadata", + "type": "object", + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead.", + "type": "object", + "additionalProperties": true + }, + "description": { + "description": "Optional devfile description", + "type": "string" + }, + "displayName": { + "description": "Optional devfile display name", + "type": "string" + }, + "globalMemoryLimit": { + "description": "Optional devfile global memory limit", + "type": "string" + }, + "icon": { + "description": "Optional devfile icon, can be a URI or a relative path in the project", + "type": "string" + }, + "language": { + "description": "Optional devfile language", + "type": "string" + }, + "name": { + "description": "Optional devfile name", + "type": "string" + }, + "projectType": { + "description": "Optional devfile project type", + "type": "string" + }, + "tags": { + "description": "Optional devfile tags", + "type": "array", + "items": { + "type": "string" + } + }, + "version": { + "description": "Optional semver-compatible version", + "type": "string", + "pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" + }, + "website": { + "description": "Optional devfile website", + "type": "string" + } + }, + "additionalProperties": true + }, + "parent": { + "description": "Parent devworkspace template", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "id" + ] + }, + { + "required": [ + "kubernetes" + ] + } + ], + "properties": { + "attributes": { + "description": "Overrides of attributes encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "object", + "additionalProperties": true + }, + "commands": { + "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "array", + "items": { "type": "object", + "required": [ + "id" + ], "oneOf": [ { "required": [ - "uri" + "exec" ] }, { "required": [ - "id" + "apply" ] }, { "required": [ - "kubernetes" - ] - } - ], - "properties": { - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", - "type": "object", - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring workspace-related containers", - "type": "object", - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "cpuLimit": { - "type": "string" - }, - "cpuRequest": { - "type": "string" - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "memoryRequest": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "id": { - "description": "Id in a registry that contains a Devfile yaml file", - "type": "string" - }, - "kubernetes": { - "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - } - }, - "additionalProperties": false - }, - "registryUrl": { - "type": "string" - }, - "uri": { - "description": "Uri of a Devfile yaml file", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "events": { - "description": "Bindings of commands to events. Each command is referred-to by its name.", - "type": "object", - "properties": { - "postStart": { - "description": "IDs of commands that should be executed after the workspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", - "type": "array", - "items": { - "type": "string" - } - }, - "postStop": { - "description": "IDs of commands that should be executed after stopping the workspace.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStart": { - "description": "IDs of commands that should be executed before the workspace start. Kubernetes-wise, these commands would typically be executed in init containers of the workspace POD.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStop": { - "description": "IDs of commands that should be executed before stopping the workspace.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "metadata": { - "description": "Optional metadata", - "type": "object", - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Optional devfile description", - "type": "string" - }, - "displayName": { - "description": "Optional devfile display name", - "type": "string" - }, - "globalMemoryLimit": { - "description": "Optional devfile global memory limit", - "type": "string" - }, - "icon": { - "description": "Optional devfile icon", - "type": "string" - }, - "name": { - "description": "Optional devfile name", - "type": "string" - }, - "tags": { - "description": "Optional devfile tags", - "type": "array", - "items": { - "type": "string" - } - }, - "version": { - "description": "Optional semver-compatible version", - "type": "string", - "pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" - } - }, - "additionalProperties": true - }, - "parent": { - "description": "Parent workspace template", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "id" - ] - }, - { - "required": [ - "kubernetes" - ] - } - ], - "properties": { - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, - { - "required": [ - "composite" + "composite" ] } ], "properties": { "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", + "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.", "type": "object", "properties": { "component": { @@ -1708,102 +899,6 @@ const JsonSchema210 = `{ "type": "string", "maxLength": 63, "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false } }, "additionalProperties": false @@ -1837,11 +932,6 @@ const JsonSchema210 = `{ "required": [ "volume" ] - }, - { - "required": [ - "plugin" - ] } ], "properties": { @@ -1851,7 +941,7 @@ const JsonSchema210 = `{ "additionalProperties": true }, "container": { - "description": "Allows adding and configuring workspace-related containers", + "description": "Allows adding and configuring devworkspace-related containers", "type": "object", "properties": { "args": { @@ -1892,7 +982,7 @@ const JsonSchema210 = `{ "additionalProperties": true }, "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", "type": "string", "enum": [ "public", @@ -1995,95 +1085,8 @@ const JsonSchema210 = `{ }, "additionalProperties": false }, - "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", + "kubernetes": { + "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", "type": "object", "oneOf": [ { @@ -2112,7 +1115,7 @@ const JsonSchema210 = `{ "additionalProperties": true }, "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", "type": "string", "enum": [ "public", @@ -2163,8 +1166,14 @@ const JsonSchema210 = `{ }, "additionalProperties": false }, - "plugin": { - "description": "Allows importing a plugin.\n\nPlugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as 'DevWorkspaceTemplate' Kubernetes Custom Resources", + "name": { + "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "openshift": { + "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", "type": "object", "oneOf": [ { @@ -2174,695 +1183,71 @@ const JsonSchema210 = `{ }, { "required": [ - "id" - ] - }, - { - "required": [ - "kubernetes" + "inlined" ] } ], "properties": { - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", + "endpoints": { "type": "array", "items": { "type": "object", "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, - { - "required": [ - "composite" - ] - } + "name" ], "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", - "type": "object", - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", + "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", "type": "object", "additionalProperties": true }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", + "exposure": { + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" + "enum": [ + "public", + "internal", + "none" ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring workspace-related containers", - "type": "object", - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "cpuLimit": { - "type": "string" - }, - "cpuRequest": { - "type": "string" - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "memoryRequest": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false }, "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", "type": "string", "maxLength": 63, "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" }, - "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false + "path": { + "description": "Path of the endpoint URL", + "type": "string" }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false + "protocol": { + "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss", + "tcp", + "udp" + ] + }, + "secure": { + "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", + "type": "boolean" + }, + "targetPort": { + "type": "integer" } }, "additionalProperties": false } }, - "id": { - "description": "Id in a registry that contains a Devfile yaml file", - "type": "string" - }, - "kubernetes": { - "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - } - }, - "additionalProperties": false - }, - "registryUrl": { + "inlined": { + "description": "Inlined manifest", "type": "string" }, "uri": { - "description": "Uri of a Devfile yaml file", + "description": "Location in a file fetched from a uri.", "type": "string" } }, @@ -2872,6 +1257,10 @@ const JsonSchema210 = `{ "description": "Allows specifying the definition of a volume shared by several other components", "type": "object", "properties": { + "ephemeral": { + "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", + "type": "boolean" + }, "size": { "description": "Size of the volume", "type": "string" @@ -2917,11 +1306,6 @@ const JsonSchema210 = `{ "git" ] }, - { - "required": [ - "github" - ] - }, { "required": [ "zip" @@ -2967,48 +1351,12 @@ const JsonSchema210 = `{ }, "additionalProperties": false }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, "name": { "description": "Project name", "type": "string", "maxLength": 63, "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" }, - "sparseCheckoutDirs": { - "description": "Populate the project sparsely with selected directories.", - "type": "array", - "items": { - "type": "string" - } - }, "zip": { "description": "Project's Zip source", "type": "object", @@ -3025,6 +1373,7 @@ const JsonSchema210 = `{ } }, "registryUrl": { + "description": "Registry URL to pull the parent devfile from when using id in the parent reference. To ensure the parent devfile gets resolved consistently in different environments, it is recommended to always specify the 'regsitryURL' when 'Id' is used.", "type": "string" }, "starterProjects": { @@ -3041,11 +1390,6 @@ const JsonSchema210 = `{ "git" ] }, - { - "required": [ - "github" - ] - }, { "required": [ "zip" @@ -3091,35 +1435,6 @@ const JsonSchema210 = `{ }, "additionalProperties": false }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, "name": { "description": "Project name", "type": "string", @@ -3146,14 +1461,21 @@ const JsonSchema210 = `{ } }, "uri": { - "description": "Uri of a Devfile yaml file", + "description": "URI Reference of a parent devfile YAML file. It can be a full URL or a relative URI with the current devfile as the base URI.", "type": "string" + }, + "variables": { + "description": "Overrides of variables encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "object", + "additionalProperties": { + "type": "string" + } } }, "additionalProperties": false }, "projects": { - "description": "Projects worked on in the workspace, containing names and sources locations", + "description": "Projects worked on in the devworkspace, containing names and sources locations", "type": "array", "items": { "type": "object", @@ -3166,11 +1488,6 @@ const JsonSchema210 = `{ "git" ] }, - { - "required": [ - "github" - ] - }, { "required": [ "zip" @@ -3219,51 +1536,12 @@ const JsonSchema210 = `{ }, "additionalProperties": false }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, "name": { "description": "Project name", "type": "string", "maxLength": 63, "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" }, - "sparseCheckoutDirs": { - "description": "Populate the project sparsely with selected directories.", - "type": "array", - "items": { - "type": "string" - } - }, "zip": { "description": "Project's Zip source", "type": "object", @@ -3298,11 +1576,6 @@ const JsonSchema210 = `{ "git" ] }, - { - "required": [ - "github" - ] - }, { "required": [ "zip" @@ -3351,38 +1624,6 @@ const JsonSchema210 = `{ }, "additionalProperties": false }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, "name": { "description": "Project name", "type": "string", @@ -3407,6 +1648,13 @@ const JsonSchema210 = `{ }, "additionalProperties": false } + }, + "variables": { + "description": "Map of key-value variables used for string replacement in the devfile. Values can can be referenced via {{variable-key}} to replace the corresponding value in string fields in the devfile. Replacement cannot be used for\n\n - schemaVersion, metadata, parent source - element identifiers, e.g. command id, component name, endpoint name, project name - references to identifiers, e.g. in events, a command's component, container's volume mount name - string enums, e.g. command group kind, endpoint exposure", + "type": "object", + "additionalProperties": { + "type": "string" + } } }, "additionalProperties": false diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/2.2.0/devfileJsonSchema220.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/2.2.0/devfileJsonSchema220.go new file mode 100644 index 00000000000..15c05b6356d --- /dev/null +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/2.2.0/devfileJsonSchema220.go @@ -0,0 +1,1928 @@ +package version220 + +// https://raw.githubusercontent.com/devfile/api/main/schemas/latest/devfile.json +const JsonSchema220 = `{ + "description": "Devfile describes the structure of a cloud-native devworkspace and development environment.", + "type": "object", + "title": "Devfile schema - Version 2.2.0-alpha", + "required": [ + "schemaVersion" + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "commands": { + "description": "Predefined, ready-to-use, devworkspace-related commands", + "type": "array", + "items": { + "type": "object", + "required": [ + "id" + ], + "oneOf": [ + { + "required": [ + "exec" + ] + }, + { + "required": [ + "apply" + ] + }, + { + "required": [ + "composite" + ] + } + ], + "properties": { + "apply": { + "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.", + "type": "object", + "required": [ + "component" + ], + "properties": { + "component": { + "description": "Describes component that will be applied", + "type": "string" + }, + "group": { + "description": "Defines the group this command is part of", + "type": "object", + "required": [ + "kind" + ], + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "type": "string", + "enum": [ + "build", + "run", + "test", + "debug", + "deploy" + ] + } + }, + "additionalProperties": false + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + } + }, + "additionalProperties": false + }, + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "composite": { + "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", + "type": "object", + "properties": { + "commands": { + "description": "The commands that comprise this composite command", + "type": "array", + "items": { + "type": "string" + } + }, + "group": { + "description": "Defines the group this command is part of", + "type": "object", + "required": [ + "kind" + ], + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "type": "string", + "enum": [ + "build", + "run", + "test", + "debug", + "deploy" + ] + } + }, + "additionalProperties": false + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "parallel": { + "description": "Indicates if the sub-commands should be executed concurrently", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "exec": { + "description": "CLI Command executed in an existing component container", + "type": "object", + "required": [ + "commandLine", + "component" + ], + "properties": { + "commandLine": { + "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", + "type": "string" + }, + "component": { + "description": "Describes component to which given action relates", + "type": "string" + }, + "env": { + "description": "Optional list of environment variables that have to be set before running the command", + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "group": { + "description": "Defines the group this command is part of", + "type": "object", + "required": [ + "kind" + ], + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "type": "string", + "enum": [ + "build", + "run", + "test", + "debug", + "deploy" + ] + } + }, + "additionalProperties": false + }, + "hotReloadCapable": { + "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", + "type": "boolean" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "workingDir": { + "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", + "type": "string" + } + }, + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + } + }, + "additionalProperties": false + } + }, + "components": { + "description": "List of the devworkspace components, such as editor and plugins, user-provided containers, or other types of components", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "oneOf": [ + { + "required": [ + "container" + ] + }, + { + "required": [ + "kubernetes" + ] + }, + { + "required": [ + "openshift" + ] + }, + { + "required": [ + "volume" + ] + }, + { + "required": [ + "image" + ] + } + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "container": { + "description": "Allows adding and configuring devworkspace-related containers", + "type": "object", + "required": [ + "image" + ], + "properties": { + "args": { + "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", + "type": "array", + "items": { + "type": "string" + } + }, + "cpuLimit": { + "type": "string" + }, + "cpuRequest": { + "type": "string" + }, + "dedicatedPod": { + "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", + "type": "boolean" + }, + "endpoints": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "targetPort" + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", + "type": "object", + "additionalProperties": true + }, + "exposure": { + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", + "type": "string", + "default": "public", + "enum": [ + "public", + "internal", + "none" + ] + }, + "name": { + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "Path of the endpoint URL", + "type": "string" + }, + "protocol": { + "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", + "type": "string", + "default": "http", + "enum": [ + "http", + "https", + "ws", + "wss", + "tcp", + "udp" + ] + }, + "secure": { + "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", + "type": "boolean" + }, + "targetPort": { + "type": "integer" + } + }, + "additionalProperties": false + } + }, + "env": { + "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "image": { + "type": "string" + }, + "memoryLimit": { + "type": "string" + }, + "memoryRequest": { + "type": "string" + }, + "mountSources": { + "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", + "type": "boolean" + }, + "sourceMapping": { + "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", + "type": "string", + "default": "/projects" + }, + "volumeMounts": { + "description": "List of volumes mounts that should be mounted is this container.", + "type": "array", + "items": { + "description": "Volume that should be mounted to a component container", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", + "type": "string" + } + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "image": { + "description": "Allows specifying the definition of an image for outer loop builds", + "type": "object", + "required": [ + "imageName" + ], + "oneOf": [ + { + "required": [ + "dockerfile" + ] + } + ], + "properties": { + "dockerfile": { + "description": "Allows specifying dockerfile type build", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "devfileRegistry" + ] + }, + { + "required": [ + "git" + ] + } + ], + "properties": { + "args": { + "description": "The arguments to supply to the dockerfile build.", + "type": "array", + "items": { + "type": "string" + } + }, + "buildContext": { + "description": "Path of source directory to establish build context. Defaults to ${PROJECT_ROOT} in the container", + "type": "string" + }, + "devfileRegistry": { + "description": "Dockerfile's Devfile Registry source", + "type": "object", + "required": [ + "id" + ], + "properties": { + "id": { + "description": "Id in a devfile registry that contains a Dockerfile. The src in the OCI registry required for the Dockerfile build will be downloaded for building the image.", + "type": "string" + }, + "registryUrl": { + "description": "Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. To ensure the Dockerfile gets resolved consistently in different environments, it is recommended to always specify the 'devfileRegistryUrl' when 'Id' is used.", + "type": "string" + } + }, + "additionalProperties": false + }, + "git": { + "description": "Dockerfile's Git source", + "type": "object", + "required": [ + "remotes" + ], + "properties": { + "checkoutFrom": { + "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", + "type": "object", + "properties": { + "remote": { + "description": "The remote name should be used as init. Required if there are more than one remote configured", + "type": "string" + }, + "revision": { + "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", + "type": "string" + } + }, + "additionalProperties": false + }, + "fileLocation": { + "description": "Location of the Dockerfile in the Git repository when using git as Dockerfile src. Defaults to Dockerfile.", + "type": "string" + }, + "remotes": { + "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "rootRequired": { + "description": "Specify if a privileged builder pod is required.\n\nDefault value is 'false'", + "type": "boolean" + }, + "uri": { + "description": "URI Reference of a Dockerfile. It can be a full URL or a relative URI from the current devfile as the base URI.", + "type": "string" + } + }, + "additionalProperties": false + }, + "imageName": { + "description": "Name of the image for the resulting outerloop build", + "type": "string" + } + }, + "additionalProperties": false + }, + "kubernetes": { + "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ], + "properties": { + "endpoints": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "targetPort" + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", + "type": "object", + "additionalProperties": true + }, + "exposure": { + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", + "type": "string", + "default": "public", + "enum": [ + "public", + "internal", + "none" + ] + }, + "name": { + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "Path of the endpoint URL", + "type": "string" + }, + "protocol": { + "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", + "type": "string", + "default": "http", + "enum": [ + "http", + "https", + "ws", + "wss", + "tcp", + "udp" + ] + }, + "secure": { + "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", + "type": "boolean" + }, + "targetPort": { + "type": "integer" + } + }, + "additionalProperties": false + } + }, + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "additionalProperties": false + }, + "name": { + "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "openshift": { + "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ], + "properties": { + "endpoints": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name", + "targetPort" + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", + "type": "object", + "additionalProperties": true + }, + "exposure": { + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", + "type": "string", + "default": "public", + "enum": [ + "public", + "internal", + "none" + ] + }, + "name": { + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "Path of the endpoint URL", + "type": "string" + }, + "protocol": { + "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", + "type": "string", + "default": "http", + "enum": [ + "http", + "https", + "ws", + "wss", + "tcp", + "udp" + ] + }, + "secure": { + "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", + "type": "boolean" + }, + "targetPort": { + "type": "integer" + } + }, + "additionalProperties": false + } + }, + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "additionalProperties": false + }, + "volume": { + "description": "Allows specifying the definition of a volume shared by several other components", + "type": "object", + "properties": { + "ephemeral": { + "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", + "type": "boolean" + }, + "size": { + "description": "Size of the volume", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "events": { + "description": "Bindings of commands to events. Each command is referred-to by its name.", + "type": "object", + "properties": { + "postStart": { + "description": "IDs of commands that should be executed after the devworkspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", + "type": "array", + "items": { + "type": "string" + } + }, + "postStop": { + "description": "IDs of commands that should be executed after stopping the devworkspace.", + "type": "array", + "items": { + "type": "string" + } + }, + "preStart": { + "description": "IDs of commands that should be executed before the devworkspace start. Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD.", + "type": "array", + "items": { + "type": "string" + } + }, + "preStop": { + "description": "IDs of commands that should be executed before stopping the devworkspace.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "metadata": { + "description": "Optional metadata", + "type": "object", + "properties": { + "architectures": { + "description": "Optional list of processor architectures that the devfile supports, empty list suggests that the devfile can be used on any architecture", + "type": "array", + "uniqueItems": true, + "items": { + "description": "Architecture describes the architecture type", + "type": "string", + "enum": [ + "amd64", + "arm64", + "ppc64le", + "s390x" + ] + } + }, + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead.", + "type": "object", + "additionalProperties": true + }, + "description": { + "description": "Optional devfile description", + "type": "string" + }, + "displayName": { + "description": "Optional devfile display name", + "type": "string" + }, + "globalMemoryLimit": { + "description": "Optional devfile global memory limit", + "type": "string" + }, + "icon": { + "description": "Optional devfile icon, can be a URI or a relative path in the project", + "type": "string" + }, + "language": { + "description": "Optional devfile language", + "type": "string" + }, + "name": { + "description": "Optional devfile name", + "type": "string" + }, + "projectType": { + "description": "Optional devfile project type", + "type": "string" + }, + "provider": { + "description": "Optional devfile provider information", + "type": "string" + }, + "supportUrl": { + "description": "Optional link to a page that provides support information", + "type": "string" + }, + "tags": { + "description": "Optional devfile tags", + "type": "array", + "items": { + "type": "string" + } + }, + "version": { + "description": "Optional semver-compatible version", + "type": "string", + "pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" + }, + "website": { + "description": "Optional devfile website", + "type": "string" + } + }, + "additionalProperties": true + }, + "parent": { + "description": "Parent devworkspace template", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "id" + ] + }, + { + "required": [ + "kubernetes" + ] + } + ], + "properties": { + "attributes": { + "description": "Overrides of attributes encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "object", + "additionalProperties": true + }, + "commands": { + "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "array", + "items": { + "type": "object", + "required": [ + "id" + ], + "oneOf": [ + { + "required": [ + "exec" + ] + }, + { + "required": [ + "apply" + ] + }, + { + "required": [ + "composite" + ] + } + ], + "properties": { + "apply": { + "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.", + "type": "object", + "properties": { + "component": { + "description": "Describes component that will be applied", + "type": "string" + }, + "group": { + "description": "Defines the group this command is part of", + "type": "object", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "type": "string", + "enum": [ + "build", + "run", + "test", + "debug", + "deploy" + ] + } + }, + "additionalProperties": false + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + } + }, + "additionalProperties": false + }, + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "composite": { + "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", + "type": "object", + "properties": { + "commands": { + "description": "The commands that comprise this composite command", + "type": "array", + "items": { + "type": "string" + } + }, + "group": { + "description": "Defines the group this command is part of", + "type": "object", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "type": "string", + "enum": [ + "build", + "run", + "test", + "debug", + "deploy" + ] + } + }, + "additionalProperties": false + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "parallel": { + "description": "Indicates if the sub-commands should be executed concurrently", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "exec": { + "description": "CLI Command executed in an existing component container", + "type": "object", + "properties": { + "commandLine": { + "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", + "type": "string" + }, + "component": { + "description": "Describes component to which given action relates", + "type": "string" + }, + "env": { + "description": "Optional list of environment variables that have to be set before running the command", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "group": { + "description": "Defines the group this command is part of", + "type": "object", + "properties": { + "isDefault": { + "description": "Identifies the default command for a given group kind", + "type": "boolean" + }, + "kind": { + "description": "Kind of group the command is part of", + "type": "string", + "enum": [ + "build", + "run", + "test", + "debug", + "deploy" + ] + } + }, + "additionalProperties": false + }, + "hotReloadCapable": { + "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", + "type": "boolean" + }, + "label": { + "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", + "type": "string" + }, + "workingDir": { + "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", + "type": "string" + } + }, + "additionalProperties": false + }, + "id": { + "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + } + }, + "additionalProperties": false + } + }, + "components": { + "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "oneOf": [ + { + "required": [ + "container" + ] + }, + { + "required": [ + "kubernetes" + ] + }, + { + "required": [ + "openshift" + ] + }, + { + "required": [ + "volume" + ] + }, + { + "required": [ + "image" + ] + } + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "container": { + "description": "Allows adding and configuring devworkspace-related containers", + "type": "object", + "properties": { + "args": { + "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", + "type": "array", + "items": { + "type": "string" + } + }, + "cpuLimit": { + "type": "string" + }, + "cpuRequest": { + "type": "string" + }, + "dedicatedPod": { + "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", + "type": "boolean" + }, + "endpoints": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", + "type": "object", + "additionalProperties": true + }, + "exposure": { + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", + "type": "string", + "enum": [ + "public", + "internal", + "none" + ] + }, + "name": { + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "Path of the endpoint URL", + "type": "string" + }, + "protocol": { + "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss", + "tcp", + "udp" + ] + }, + "secure": { + "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", + "type": "boolean" + }, + "targetPort": { + "type": "integer" + } + }, + "additionalProperties": false + } + }, + "env": { + "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false + } + }, + "image": { + "type": "string" + }, + "memoryLimit": { + "type": "string" + }, + "memoryRequest": { + "type": "string" + }, + "mountSources": { + "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", + "type": "boolean" + }, + "sourceMapping": { + "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", + "type": "string" + }, + "volumeMounts": { + "description": "List of volumes mounts that should be mounted is this container.", + "type": "array", + "items": { + "description": "Volume that should be mounted to a component container", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", + "type": "string" + } + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "image": { + "description": "Allows specifying the definition of an image for outer loop builds", + "type": "object", + "oneOf": [ + { + "required": [ + "dockerfile" + ] + } + ], + "properties": { + "dockerfile": { + "description": "Allows specifying dockerfile type build", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "devfileRegistry" + ] + }, + { + "required": [ + "git" + ] + } + ], + "properties": { + "args": { + "description": "The arguments to supply to the dockerfile build.", + "type": "array", + "items": { + "type": "string" + } + }, + "buildContext": { + "description": "Path of source directory to establish build context. Defaults to ${PROJECT_ROOT} in the container", + "type": "string" + }, + "devfileRegistry": { + "description": "Dockerfile's Devfile Registry source", + "type": "object", + "properties": { + "id": { + "description": "Id in a devfile registry that contains a Dockerfile. The src in the OCI registry required for the Dockerfile build will be downloaded for building the image.", + "type": "string" + }, + "registryUrl": { + "description": "Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. To ensure the Dockerfile gets resolved consistently in different environments, it is recommended to always specify the 'devfileRegistryUrl' when 'Id' is used.", + "type": "string" + } + }, + "additionalProperties": false + }, + "git": { + "description": "Dockerfile's Git source", + "type": "object", + "properties": { + "checkoutFrom": { + "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", + "type": "object", + "properties": { + "remote": { + "description": "The remote name should be used as init. Required if there are more than one remote configured", + "type": "string" + }, + "revision": { + "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", + "type": "string" + } + }, + "additionalProperties": false + }, + "fileLocation": { + "description": "Location of the Dockerfile in the Git repository when using git as Dockerfile src. Defaults to Dockerfile.", + "type": "string" + }, + "remotes": { + "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "rootRequired": { + "description": "Specify if a privileged builder pod is required.\n\nDefault value is 'false'", + "type": "boolean" + }, + "uri": { + "description": "URI Reference of a Dockerfile. It can be a full URL or a relative URI from the current devfile as the base URI.", + "type": "string" + } + }, + "additionalProperties": false + }, + "imageName": { + "description": "Name of the image for the resulting outerloop build", + "type": "string" + } + }, + "additionalProperties": false + }, + "kubernetes": { + "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ], + "properties": { + "endpoints": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", + "type": "object", + "additionalProperties": true + }, + "exposure": { + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", + "type": "string", + "enum": [ + "public", + "internal", + "none" + ] + }, + "name": { + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "Path of the endpoint URL", + "type": "string" + }, + "protocol": { + "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss", + "tcp", + "udp" + ] + }, + "secure": { + "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", + "type": "boolean" + }, + "targetPort": { + "type": "integer" + } + }, + "additionalProperties": false + } + }, + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "additionalProperties": false + }, + "name": { + "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "openshift": { + "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", + "type": "object", + "oneOf": [ + { + "required": [ + "uri" + ] + }, + { + "required": [ + "inlined" + ] + } + ], + "properties": { + "endpoints": { + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", + "type": "object", + "additionalProperties": true + }, + "exposure": { + "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", + "type": "string", + "enum": [ + "public", + "internal", + "none" + ] + }, + "name": { + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "path": { + "description": "Path of the endpoint URL", + "type": "string" + }, + "protocol": { + "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss", + "tcp", + "udp" + ] + }, + "secure": { + "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", + "type": "boolean" + }, + "targetPort": { + "type": "integer" + } + }, + "additionalProperties": false + } + }, + "inlined": { + "description": "Inlined manifest", + "type": "string" + }, + "uri": { + "description": "Location in a file fetched from a uri.", + "type": "string" + } + }, + "additionalProperties": false + }, + "volume": { + "description": "Allows specifying the definition of a volume shared by several other components", + "type": "object", + "properties": { + "ephemeral": { + "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", + "type": "boolean" + }, + "size": { + "description": "Size of the volume", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "id": { + "description": "Id in a registry that contains a Devfile yaml file", + "type": "string" + }, + "kubernetes": { + "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "additionalProperties": false + }, + "projects": { + "description": "Overrides of projects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "oneOf": [ + { + "required": [ + "git" + ] + }, + { + "required": [ + "zip" + ] + } + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "clonePath": { + "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", + "type": "string" + }, + "git": { + "description": "Project's Git source", + "type": "object", + "properties": { + "checkoutFrom": { + "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", + "type": "object", + "properties": { + "remote": { + "description": "The remote name should be used as init. Required if there are more than one remote configured", + "type": "string" + }, + "revision": { + "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", + "type": "string" + } + }, + "additionalProperties": false + }, + "remotes": { + "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "name": { + "description": "Project name", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "zip": { + "description": "Project's Zip source", + "type": "object", + "properties": { + "location": { + "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "registryUrl": { + "description": "Registry URL to pull the parent devfile from when using id in the parent reference. To ensure the parent devfile gets resolved consistently in different environments, it is recommended to always specify the 'registryUrl' when 'id' is used.", + "type": "string" + }, + "starterProjects": { + "description": "Overrides of starterProjects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "oneOf": [ + { + "required": [ + "git" + ] + }, + { + "required": [ + "zip" + ] + } + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "description": { + "description": "Description of a starter project", + "type": "string" + }, + "git": { + "description": "Project's Git source", + "type": "object", + "properties": { + "checkoutFrom": { + "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", + "type": "object", + "properties": { + "remote": { + "description": "The remote name should be used as init. Required if there are more than one remote configured", + "type": "string" + }, + "revision": { + "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", + "type": "string" + } + }, + "additionalProperties": false + }, + "remotes": { + "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "name": { + "description": "Project name", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "subDir": { + "description": "Sub-directory from a starter project to be used as root for starter project.", + "type": "string" + }, + "zip": { + "description": "Project's Zip source", + "type": "object", + "properties": { + "location": { + "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "uri": { + "description": "URI Reference of a parent devfile YAML file. It can be a full URL or a relative URI with the current devfile as the base URI.", + "type": "string" + }, + "variables": { + "description": "Overrides of variables encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "projects": { + "description": "Projects worked on in the devworkspace, containing names and sources locations", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "oneOf": [ + { + "required": [ + "git" + ] + }, + { + "required": [ + "zip" + ] + } + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "clonePath": { + "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", + "type": "string" + }, + "git": { + "description": "Project's Git source", + "type": "object", + "required": [ + "remotes" + ], + "properties": { + "checkoutFrom": { + "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", + "type": "object", + "properties": { + "remote": { + "description": "The remote name should be used as init. Required if there are more than one remote configured", + "type": "string" + }, + "revision": { + "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", + "type": "string" + } + }, + "additionalProperties": false + }, + "remotes": { + "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "name": { + "description": "Project name", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "zip": { + "description": "Project's Zip source", + "type": "object", + "properties": { + "location": { + "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "schemaVersion": { + "description": "Devfile schema version", + "type": "string", + "pattern": "^([2-9])\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" + }, + "starterProjects": { + "description": "StarterProjects is a project that can be used as a starting point when bootstrapping new projects", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "oneOf": [ + { + "required": [ + "git" + ] + }, + { + "required": [ + "zip" + ] + } + ], + "properties": { + "attributes": { + "description": "Map of implementation-dependant free-form YAML attributes.", + "type": "object", + "additionalProperties": true + }, + "description": { + "description": "Description of a starter project", + "type": "string" + }, + "git": { + "description": "Project's Git source", + "type": "object", + "required": [ + "remotes" + ], + "properties": { + "checkoutFrom": { + "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", + "type": "object", + "properties": { + "remote": { + "description": "The remote name should be used as init. Required if there are more than one remote configured", + "type": "string" + }, + "revision": { + "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", + "type": "string" + } + }, + "additionalProperties": false + }, + "remotes": { + "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "name": { + "description": "Project name", + "type": "string", + "maxLength": 63, + "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + }, + "subDir": { + "description": "Sub-directory from a starter project to be used as root for starter project.", + "type": "string" + }, + "zip": { + "description": "Project's Zip source", + "type": "object", + "properties": { + "location": { + "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "variables": { + "description": "Map of key-value variables used for string replacement in the devfile. Values can be referenced via {{variable-key}} to replace the corresponding value in string fields in the devfile. Replacement cannot be used for\n\n - schemaVersion, metadata, parent source\n\n - element identifiers, e.g. command id, component name, endpoint name, project name\n\n - references to identifiers, e.g. in events, a command's component, container's volume mount name\n\n - string enums, e.g. command group kind, endpoint exposure", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false +} +` diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/attributes.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/attributes.go new file mode 100644 index 00000000000..d241a1b5b05 --- /dev/null +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/attributes.go @@ -0,0 +1,52 @@ +package v2 + +import ( + "fmt" + + "github.com/devfile/api/v2/pkg/attributes" +) + +// GetAttributes gets the devfile top level attributes +func (d *DevfileV2) GetAttributes() (attributes.Attributes, error) { + // This feature was introduced in 2.1.0; so any version 2.1.0 and up should use the 2.1.0 implementation + switch d.SchemaVersion { + case "2.0.0": + return attributes.Attributes{}, fmt.Errorf("top-level attributes is not supported in devfile schema version 2.0.0") + default: + return d.Attributes, nil + } +} + +// UpdateAttributes updates the devfile top level attribute for the specific key, err out if key is absent +func (d *DevfileV2) UpdateAttributes(key string, value interface{}) error { + var err error + + // This feature was introduced in 2.1.0; so any version 2.1.0 and up should use the 2.1.0 implementation + switch d.SchemaVersion { + case "2.0.0": + return fmt.Errorf("top-level attributes is not supported in devfile schema version 2.0.0") + default: + if d.Attributes.Exists(key) { + d.Attributes.Put(key, value, &err) + } else { + return fmt.Errorf("cannot update top-level attribute, key %s is not present", key) + } + } + + return err +} + +// AddAttributes adds to the devfile top level attributes, value will be overwritten if key is already present +func (d *DevfileV2) AddAttributes(key string, value interface{}) error { + var err error + + // This feature was introduced in 2.1.0; so any version 2.1.0 and up should use the 2.1.0 implementation + switch d.SchemaVersion { + case "2.0.0": + return fmt.Errorf("top-level attributes is not supported in devfile schema version 2.0.0") + default: + d.Attributes.Put(key, value, &err) + } + + return err +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/commands.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/commands.go index d308713c19e..42a1b6a12a1 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/commands.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/commands.go @@ -1,59 +1,102 @@ package v2 import ( - "strings" - + "fmt" v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/devfile/library/pkg/devfile/parser/data/v2/common" + "reflect" + "strings" ) // GetCommands returns the slice of Command objects parsed from the Devfile func (d *DevfileV2) GetCommands(options common.DevfileOptions) ([]v1.Command, error) { - if len(options.Filter) == 0 { + + if reflect.DeepEqual(options, common.DevfileOptions{}) { return d.Commands, nil } var commands []v1.Command for _, command := range d.Commands { + // Filter Command Attributes filterIn, err := common.FilterDevfileObject(command.Attributes, options) if err != nil { return nil, err + } else if !filterIn { + continue } - if filterIn { - command.Id = strings.ToLower(command.Id) - commands = append(commands, command) + // Filter Command Type - Exec, Composite, etc. + commandType, err := common.GetCommandType(command) + if err != nil { + return nil, err } + if options.CommandOptions.CommandType != "" && commandType != options.CommandOptions.CommandType { + continue + } + + // Filter Command Group Kind - Run, Build, etc. + commandGroup := common.GetGroup(command) + // exclude conditions: + // 1. options group is present and command group is present but does not match + // 2. options group is present and command group is not present + if options.CommandOptions.CommandGroupKind != "" && ((commandGroup != nil && options.CommandOptions.CommandGroupKind != commandGroup.Kind) || commandGroup == nil) { + continue + } + + commands = append(commands, command) } return commands, nil } // AddCommands adds the slice of Command objects to the Devfile's commands -// if a command is already defined, error out -func (d *DevfileV2) AddCommands(commands ...v1.Command) error { - devfileCommands, err := d.GetCommands(common.DevfileOptions{}) - if err != nil { - return err - } - +// a command is considered as invalid if it is already defined +// command list passed in will be all processed, and returns a total error of all invalid commands +func (d *DevfileV2) AddCommands(commands []v1.Command) error { + var errorsList []string for _, command := range commands { - for _, devfileCommand := range devfileCommands { + var err error + for _, devfileCommand := range d.Commands { if command.Id == devfileCommand.Id { - return &common.FieldAlreadyExistError{Name: command.Id, Field: "command"} + err = &common.FieldAlreadyExistError{Name: command.Id, Field: "command"} + errorsList = append(errorsList, err.Error()) + break } } - d.Commands = append(d.Commands, command) + if err == nil { + d.Commands = append(d.Commands, command) + } + } + if len(errorsList) > 0 { + return fmt.Errorf("errors while adding commands:\n%s", strings.Join(errorsList, "\n")) } return nil } // UpdateCommand updates the command with the given id -func (d *DevfileV2) UpdateCommand(command v1.Command) { +// return an error if the command is not found +func (d *DevfileV2) UpdateCommand(command v1.Command) error { for i := range d.Commands { - if strings.ToLower(d.Commands[i].Id) == strings.ToLower(command.Id) { + if d.Commands[i].Id == command.Id { d.Commands[i] = command - d.Commands[i].Id = strings.ToLower(d.Commands[i].Id) + return nil } } + return fmt.Errorf("update command failed: command %s not found", command.Id) +} + +// DeleteCommand removes the specified command +func (d *DevfileV2) DeleteCommand(id string) error { + + for i := range d.Commands { + if d.Commands[i].Id == id { + d.Commands = append(d.Commands[:i], d.Commands[i+1:]...) + return nil + } + } + + return &common.FieldNotFoundError{ + Field: "command", + Name: id, + } } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/command_helper.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/command_helper.go index 728ce29dad1..b3bf8a0addd 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/command_helper.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/command_helper.go @@ -1,6 +1,8 @@ package common import ( + "fmt" + v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" ) @@ -13,10 +15,6 @@ func GetGroup(dc v1.Command) *v1.CommandGroup { return dc.Exec.Group case dc.Apply != nil: return dc.Apply.Group - case dc.VscodeLaunch != nil: - return dc.VscodeLaunch.Group - case dc.VscodeTask != nil: - return dc.VscodeTask.Group case dc.Custom != nil: return dc.Custom.Group @@ -51,3 +49,57 @@ func GetExecWorkingDir(dc v1.Command) string { return "" } + +// GetApplyComponent returns the component of the apply command +func GetApplyComponent(dc v1.Command) string { + if dc.Apply != nil { + return dc.Apply.Component + } + + return "" +} + +// GetCommandType returns the command type of a given command +func GetCommandType(command v1.Command) (v1.CommandType, error) { + switch { + case command.Apply != nil: + return v1.ApplyCommandType, nil + case command.Composite != nil: + return v1.CompositeCommandType, nil + case command.Exec != nil: + return v1.ExecCommandType, nil + case command.Custom != nil: + return v1.CustomCommandType, nil + + default: + return "", fmt.Errorf("unknown command type") + } +} + +// GetCommandsMap returns a map of the command Id to the command +func GetCommandsMap(commands []v1.Command) map[string]v1.Command { + commandMap := make(map[string]v1.Command, len(commands)) + for _, command := range commands { + commandMap[command.Id] = command + } + return commandMap +} + +// GetCommandsFromEvent returns the list of commands from the event name. +// If the event is a composite command, it returns the sub-commands from the tree +func GetCommandsFromEvent(commandsMap map[string]v1.Command, eventName string) []string { + var commands []string + + if command, ok := commandsMap[eventName]; ok { + if command.Composite != nil { + for _, compositeSubCmd := range command.Composite.Commands { + subCommands := GetCommandsFromEvent(commandsMap, compositeSubCmd) + commands = append(commands, subCommands...) + } + } else { + commands = append(commands, command.Id) + } + } + + return commands +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/component_helper.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/component_helper.go index dede54285d4..1a5ba3323ca 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/component_helper.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/component_helper.go @@ -29,6 +29,8 @@ func GetComponentType(component v1.Component) (v1.ComponentType, error) { return v1.KubernetesComponentType, nil case component.Openshift != nil: return v1.OpenshiftComponentType, nil + case component.Image != nil: + return v1.ImageComponentType, nil case component.Custom != nil: return v1.CustomComponentType, nil diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/options.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/options.go index 14d595d0153..b144315147d 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/options.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/options.go @@ -3,13 +3,46 @@ package common import ( "reflect" + v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" apiAttributes "github.com/devfile/api/v2/pkg/attributes" ) // DevfileOptions provides options for Devfile operations type DevfileOptions struct { - // Filter is a map that lets you filter devfile object against their attributes. Interface can be string, float, boolean or a map + // Filter is a map that lets filter devfile object against their attributes. Interface can be string, float, boolean or a map Filter map[string]interface{} + + // CommandOptions specifies the various options available to filter commands + CommandOptions CommandOptions + + // ComponentOptions specifies the various options available to filter components + ComponentOptions ComponentOptions + + // ProjectOptions specifies the various options available to filter projects/starterProjects + ProjectOptions ProjectOptions +} + +// CommandOptions specifies the various options available to filter commands +type CommandOptions struct { + // CommandGroupKind is an option that allows to filter command based on their kind + CommandGroupKind v1.CommandGroupKind + + // CommandType is an option that allows to filter command based on their type + CommandType v1.CommandType +} + +// ComponentOptions specifies the various options available to filter components +type ComponentOptions struct { + + // ComponentType is an option that allows to filter component based on their type + ComponentType v1.ComponentType +} + +// ProjectOptions specifies the various options available to filter projects/starterProjects +type ProjectOptions struct { + + // ProjectSourceType is an option that allows to filter project based on their source type + ProjectSourceType v1.ProjectSourceType } // FilterDevfileObject filters devfile attributes with the given options diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/project_helper.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/project_helper.go index 4028779b5c6..b8df56e7d4d 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/project_helper.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/common/project_helper.go @@ -41,3 +41,18 @@ func GetDefaultSource(ps v1.GitLikeProjectSource) (remoteName string, remoteURL return remoteName, remoteURL, revision, err } + +// GetProjectSourceType returns the source type of a given project source +func GetProjectSourceType(projectSrc v1.ProjectSource) (v1.ProjectSourceType, error) { + switch { + case projectSrc.Git != nil: + return v1.GitProjectSourceType, nil + case projectSrc.Zip != nil: + return v1.ZipProjectSourceType, nil + case projectSrc.Custom != nil: + return v1.CustomProjectSourceType, nil + + default: + return "", fmt.Errorf("unknown project source type") + } +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/components.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/components.go index 2da15b73606..13a905b6584 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/components.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/components.go @@ -1,32 +1,48 @@ package v2 import ( + "fmt" + "reflect" + "strings" + v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/devfile/library/pkg/devfile/parser/data/v2/common" ) // GetComponents returns the slice of Component objects parsed from the Devfile func (d *DevfileV2) GetComponents(options common.DevfileOptions) ([]v1.Component, error) { - if len(options.Filter) == 0 { + + if reflect.DeepEqual(options, common.DevfileOptions{}) { return d.Components, nil } var components []v1.Component - for _, comp := range d.Components { - filterIn, err := common.FilterDevfileObject(comp.Attributes, options) + for _, component := range d.Components { + // Filter Component Attributes + filterIn, err := common.FilterDevfileObject(component.Attributes, options) if err != nil { return nil, err + } else if !filterIn { + continue } - if filterIn { - components = append(components, comp) + // Filter Component Type - Container, Volume, etc. + componentType, err := common.GetComponentType(component) + if err != nil { + return nil, err } + if options.ComponentOptions.ComponentType != "" && componentType != options.ComponentOptions.ComponentType { + continue + } + + components = append(components, component) } return components, nil } -// GetDevfileContainerComponents iterates through the components in the devfile and returns a list of devfile container components +// GetDevfileContainerComponents iterates through the components in the devfile and returns a list of devfile container components. +// Deprecated, use GetComponents() with the DevfileOptions. func (d *DevfileV2) GetDevfileContainerComponents(options common.DevfileOptions) ([]v1.Component, error) { var components []v1.Component devfileComponents, err := d.GetComponents(options) @@ -41,7 +57,8 @@ func (d *DevfileV2) GetDevfileContainerComponents(options common.DevfileOptions) return components, nil } -// GetDevfileVolumeComponents iterates through the components in the devfile and returns a list of devfile volume components +// GetDevfileVolumeComponents iterates through the components in the devfile and returns a list of devfile volume components. +// Deprecated, use GetComponents() with the DevfileOptions. func (d *DevfileV2) GetDevfileVolumeComponents(options common.DevfileOptions) ([]v1.Component, error) { var components []v1.Component devfileComponents, err := d.GetComponents(options) @@ -57,34 +74,53 @@ func (d *DevfileV2) GetDevfileVolumeComponents(options common.DevfileOptions) ([ } // AddComponents adds the slice of Component objects to the devfile's components -// if a component is already defined, error out +// a component is considered as invalid if it is already defined +// component list passed in will be all processed, and returns a total error of all invalid components func (d *DevfileV2) AddComponents(components []v1.Component) error { - - componentMap := make(map[string]bool) - - for _, component := range d.Components { - componentMap[component.Name] = true - } + var errorsList []string for _, component := range components { - if _, ok := componentMap[component.Name]; !ok { + var err error + for _, devfileComponent := range d.Components { + if component.Name == devfileComponent.Name { + err = &common.FieldAlreadyExistError{Name: component.Name, Field: "component"} + errorsList = append(errorsList, err.Error()) + break + } + } + if err == nil { d.Components = append(d.Components, component) - } else { - return &common.FieldAlreadyExistError{Name: component.Name, Field: "component"} } } + if len(errorsList) > 0 { + return fmt.Errorf("errors while adding components:\n%s", strings.Join(errorsList, "\n")) + } return nil } // UpdateComponent updates the component with the given name -func (d *DevfileV2) UpdateComponent(component v1.Component) { - index := -1 +// return an error if the component is not found +func (d *DevfileV2) UpdateComponent(component v1.Component) error { for i := range d.Components { if d.Components[i].Name == component.Name { - index = i - break + d.Components[i] = component + return nil + } + } + return fmt.Errorf("update component failed: component %s not found", component.Name) +} + +// DeleteComponent removes the specified component +func (d *DevfileV2) DeleteComponent(name string) error { + + for i := range d.Components { + if d.Components[i].Name == name { + d.Components = append(d.Components[:i], d.Components[i+1:]...) + return nil } } - if index != -1 { - d.Components[index] = component + + return &common.FieldNotFoundError{ + Field: "component", + Name: name, } } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/events.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/events.go index 621a8d05c94..15c88b00752 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/events.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/events.go @@ -1,8 +1,10 @@ package v2 import ( + "fmt" v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/devfile/library/pkg/devfile/parser/data/v2/common" + "strings" ) // GetEvents returns the Events Object parsed from devfile @@ -14,52 +16,69 @@ func (d *DevfileV2) GetEvents() v1.Events { } // AddEvents adds the Events Object to the devfile's events -// if the event is already defined in the devfile, error out +// an event field is considered as invalid if it is already defined +// all event fields will be checked and processed, and returns a total error of all event fields func (d *DevfileV2) AddEvents(events v1.Events) error { + + if d.Events == nil { + d.Events = &v1.Events{} + } + var errorsList []string if len(events.PreStop) > 0 { if len(d.Events.PreStop) > 0 { - return &common.FieldAlreadyExistError{Field: "pre stop"} + errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "pre stop"}).Error()) + } else { + d.Events.PreStop = events.PreStop } - d.Events.PreStop = events.PreStop } if len(events.PreStart) > 0 { if len(d.Events.PreStart) > 0 { - return &common.FieldAlreadyExistError{Field: "pre start"} + errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "pre start"}).Error()) + } else { + d.Events.PreStart = events.PreStart } - d.Events.PreStart = events.PreStart } if len(events.PostStop) > 0 { if len(d.Events.PostStop) > 0 { - return &common.FieldAlreadyExistError{Field: "post stop"} + errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "post stop"}).Error()) + } else { + d.Events.PostStop = events.PostStop } - d.Events.PostStop = events.PostStop } if len(events.PostStart) > 0 { if len(d.Events.PostStart) > 0 { - return &common.FieldAlreadyExistError{Field: "post start"} + errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "post start"}).Error()) + } else { + d.Events.PostStart = events.PostStart } - d.Events.PostStart = events.PostStart } - + if len(errorsList) > 0 { + return fmt.Errorf("errors while adding events:\n%s", strings.Join(errorsList, "\n")) + } return nil } // UpdateEvents updates the devfile's events // it only updates the events passed to it func (d *DevfileV2) UpdateEvents(postStart, postStop, preStart, preStop []string) { - if len(postStart) != 0 { + + if d.Events == nil { + d.Events = &v1.Events{} + } + + if postStart != nil { d.Events.PostStart = postStart } - if len(postStop) != 0 { + if postStop != nil { d.Events.PostStop = postStop } - if len(preStart) != 0 { + if preStart != nil { d.Events.PreStart = preStart } - if len(preStop) != 0 { + if preStop != nil { d.Events.PreStop = preStop } } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/header.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/header.go index 6798ec95d40..b005fd447bd 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/header.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/header.go @@ -20,9 +20,6 @@ func (d *DevfileV2) GetMetadata() devfilepkg.DevfileMetadata { } // SetMetadata sets the metadata for devfile -func (d *DevfileV2) SetMetadata(name, version string) { - d.Metadata = devfilepkg.DevfileMetadata{ - Name: name, - Version: version, - } +func (d *DevfileV2) SetMetadata(metadata devfilepkg.DevfileMetadata) { + d.Metadata = metadata } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/projects.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/projects.go index 01bee4debc3..c90fb2bff65 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/projects.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/projects.go @@ -1,37 +1,51 @@ package v2 import ( - "strings" - + "fmt" v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/devfile/library/pkg/devfile/parser/data/v2/common" + "reflect" + "strings" ) // GetProjects returns the Project Object parsed from devfile func (d *DevfileV2) GetProjects(options common.DevfileOptions) ([]v1.Project, error) { - if len(options.Filter) == 0 { + + if reflect.DeepEqual(options, common.DevfileOptions{}) { return d.Projects, nil } var projects []v1.Project - for _, proj := range d.Projects { - filterIn, err := common.FilterDevfileObject(proj.Attributes, options) + for _, project := range d.Projects { + // Filter Project Attributes + filterIn, err := common.FilterDevfileObject(project.Attributes, options) if err != nil { return nil, err + } else if !filterIn { + continue } - if filterIn { - projects = append(projects, proj) + // Filter Project Source Type - Git, Zip, etc. + projectSourceType, err := common.GetProjectSourceType(project.ProjectSource) + if err != nil { + return nil, err + } + if options.ProjectOptions.ProjectSourceType != "" && projectSourceType != options.ProjectOptions.ProjectSourceType { + continue } + + projects = append(projects, project) } return projects, nil } // AddProjects adss the slice of Devfile projects to the Devfile's project list -// if a project is already defined, error out +// a project is considered as invalid if it is already defined +// project list passed in will be all processed, and returns a total error of all invalid projects func (d *DevfileV2) AddProjects(projects []v1.Project) error { projectsMap := make(map[string]bool) + var errorsList []string for _, project := range d.Projects { projectsMap[project.Name] = true } @@ -40,46 +54,82 @@ func (d *DevfileV2) AddProjects(projects []v1.Project) error { if _, ok := projectsMap[project.Name]; !ok { d.Projects = append(d.Projects, project) } else { - return &common.FieldAlreadyExistError{Name: project.Name, Field: "project"} + errorsList = append(errorsList, (&common.FieldAlreadyExistError{Name: project.Name, Field: "project"}).Error()) + continue } } + if len(errorsList) > 0 { + return fmt.Errorf("errors while adding projects:\n%s", strings.Join(errorsList, "\n")) + } return nil } // UpdateProject updates the slice of Devfile projects parsed from the Devfile -func (d *DevfileV2) UpdateProject(project v1.Project) { +// return an error if the project is not found +func (d *DevfileV2) UpdateProject(project v1.Project) error { for i := range d.Projects { - if d.Projects[i].Name == strings.ToLower(project.Name) { + if d.Projects[i].Name == project.Name { d.Projects[i] = project + return nil + } + } + return fmt.Errorf("update project failed: project %s not found", project.Name) +} + +// DeleteProject removes the specified project +func (d *DevfileV2) DeleteProject(name string) error { + + for i := range d.Projects { + if d.Projects[i].Name == name { + d.Projects = append(d.Projects[:i], d.Projects[i+1:]...) + return nil } } + + return &common.FieldNotFoundError{ + Field: "project", + Name: name, + } } //GetStarterProjects returns the DevfileStarterProject parsed from devfile func (d *DevfileV2) GetStarterProjects(options common.DevfileOptions) ([]v1.StarterProject, error) { - if len(options.Filter) == 0 { + + if reflect.DeepEqual(options, common.DevfileOptions{}) { return d.StarterProjects, nil } var starterProjects []v1.StarterProject - for _, starterProj := range d.StarterProjects { - filterIn, err := common.FilterDevfileObject(starterProj.Attributes, options) + for _, starterProject := range d.StarterProjects { + // Filter Starter Project Attributes + filterIn, err := common.FilterDevfileObject(starterProject.Attributes, options) if err != nil { return nil, err + } else if !filterIn { + continue } - if filterIn { - starterProjects = append(starterProjects, starterProj) + // Filter Starter Project Source Type - Git, Zip, etc. + starterProjectSourceType, err := common.GetProjectSourceType(starterProject.ProjectSource) + if err != nil { + return nil, err + } + if options.ProjectOptions.ProjectSourceType != "" && starterProjectSourceType != options.ProjectOptions.ProjectSourceType { + continue } + + starterProjects = append(starterProjects, starterProject) } return starterProjects, nil } // AddStarterProjects adds the slice of Devfile starter projects to the Devfile's starter project list -// if a starter project is already defined, error out +// a starterProject is considered as invalid if it is already defined +// starterProject list passed in will be all processed, and returns a total error of all invalid starterProjects func (d *DevfileV2) AddStarterProjects(projects []v1.StarterProject) error { projectsMap := make(map[string]bool) + var errorsList []string for _, project := range d.StarterProjects { projectsMap[project.Name] = true } @@ -88,17 +138,39 @@ func (d *DevfileV2) AddStarterProjects(projects []v1.StarterProject) error { if _, ok := projectsMap[project.Name]; !ok { d.StarterProjects = append(d.StarterProjects, project) } else { - return &common.FieldAlreadyExistError{Name: project.Name, Field: "starterProject"} + errorsList = append(errorsList, (&common.FieldAlreadyExistError{Name: project.Name, Field: "starterProject"}).Error()) + continue } } + if len(errorsList) > 0 { + return fmt.Errorf("errors while adding starterProjects:\n%s", strings.Join(errorsList, "\n")) + } return nil } // UpdateStarterProject updates the slice of Devfile starter projects parsed from the Devfile -func (d *DevfileV2) UpdateStarterProject(project v1.StarterProject) { +func (d *DevfileV2) UpdateStarterProject(project v1.StarterProject) error { for i := range d.StarterProjects { - if d.StarterProjects[i].Name == strings.ToLower(project.Name) { + if d.StarterProjects[i].Name == project.Name { d.StarterProjects[i] = project + return nil } } + return fmt.Errorf("update starter project failed: starter project %s not found", project.Name) +} + +// DeleteStarterProject removes the specified starter project +func (d *DevfileV2) DeleteStarterProject(name string) error { + + for i := range d.StarterProjects { + if d.StarterProjects[i].Name == name { + d.StarterProjects = append(d.StarterProjects[:i], d.StarterProjects[i+1:]...) + return nil + } + } + + return &common.FieldNotFoundError{ + Field: "starter project", + Name: name, + } } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/volumes.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/volumes.go index 2a3380069ee..37a29fc70d2 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/volumes.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/volumes.go @@ -8,67 +8,61 @@ import ( "github.com/devfile/library/pkg/devfile/parser/data/v2/common" ) -// AddVolume adds the volume to the devFile and mounts it to all the container components -func (d *DevfileV2) AddVolume(volumeComponent v1.Component, path string) error { - volumeExists := false +// AddVolumeMounts adds the volume mounts to the specified container component +func (d *DevfileV2) AddVolumeMounts(containerName string, volumeMounts []v1.VolumeMount) error { var pathErrorContainers []string + found := false for _, component := range d.Components { - if component.Container != nil { - for _, volumeMount := range component.Container.VolumeMounts { - if volumeMount.Path == path { - var err = fmt.Errorf("another volume, %s, is mounted to the same path: %s, on the container: %s", volumeMount.Name, path, component.Name) - pathErrorContainers = append(pathErrorContainers, err.Error()) + if component.Container != nil && component.Name == containerName { + found = true + for _, devfileVolumeMount := range component.Container.VolumeMounts { + for _, volumeMount := range volumeMounts { + if devfileVolumeMount.Path == volumeMount.Path { + pathErrorContainers = append(pathErrorContainers, fmt.Sprintf("unable to mount volume %s, as another volume %s is mounted to the same path %s in the container %s", volumeMount.Name, devfileVolumeMount.Name, volumeMount.Path, component.Name)) + } } } - component.Container.VolumeMounts = append(component.Container.VolumeMounts, v1.VolumeMount{ - Name: volumeComponent.Name, - Path: path, - }) - } else if component.Volume != nil && component.Name == volumeComponent.Name { - volumeExists = true - break + if len(pathErrorContainers) == 0 { + component.Container.VolumeMounts = append(component.Container.VolumeMounts, volumeMounts...) + } } } - if volumeExists { - return &common.FieldAlreadyExistError{ - Field: "volume", - Name: volumeComponent.Name, + if !found { + return &common.FieldNotFoundError{ + Field: "container component", + Name: containerName, } } if len(pathErrorContainers) > 0 { - return fmt.Errorf("errors while creating volume:\n%s", strings.Join(pathErrorContainers, "\n")) + return fmt.Errorf("errors while adding volume mounts:\n%s", strings.Join(pathErrorContainers, "\n")) } - d.Components = append(d.Components, volumeComponent) - return nil } -// DeleteVolume removes the volume from the devFile and removes all the related volume mounts -func (d *DevfileV2) DeleteVolume(name string) error { +// DeleteVolumeMount deletes the volume mount from container components +func (d *DevfileV2) DeleteVolumeMount(name string) error { found := false - for i := len(d.Components) - 1; i >= 0; i-- { - if d.Components[i].Container != nil { - var tmp []v1.VolumeMount - for _, volumeMount := range d.Components[i].Container.VolumeMounts { - if volumeMount.Name != name { - tmp = append(tmp, volumeMount) + for i := range d.Components { + if d.Components[i].Container != nil && d.Components[i].Name != name { + // Volume Mounts can have multiple instances of a volume mounted at different paths + // As arrays are rearraged/shifted for deletion, we lose one element every time there is a match + // Looping backward is efficient, otherwise we would have to manually decrement counter + // if we looped forward + for j := len(d.Components[i].Container.VolumeMounts) - 1; j >= 0; j-- { + if d.Components[i].Container.VolumeMounts[j].Name == name { + found = true + d.Components[i].Container.VolumeMounts = append(d.Components[i].Container.VolumeMounts[:j], d.Components[i].Container.VolumeMounts[j+1:]...) } } - d.Components[i].Container.VolumeMounts = tmp - } else if d.Components[i].Volume != nil { - if d.Components[i].Name == name { - found = true - d.Components = append(d.Components[:i], d.Components[i+1:]...) - } } } if !found { return &common.FieldNotFoundError{ - Field: "volume", + Field: "volume mount", Name: name, } } @@ -76,31 +70,33 @@ func (d *DevfileV2) DeleteVolume(name string) error { return nil } -// GetVolumeMountPath gets the mount path of the required volume -func (d *DevfileV2) GetVolumeMountPath(name string) (string, error) { - volumeFound := false - mountFound := false - path := "" +// GetVolumeMountPaths gets all the mount paths of the specified volume mount from the specified container component. +// A container can mount at different paths for a given volume. +func (d *DevfileV2) GetVolumeMountPaths(mountName, containerName string) ([]string, error) { + componentFound := false + var mountPaths []string for _, component := range d.Components { - if component.Container != nil { + if component.Container != nil && component.Name == containerName { + componentFound = true for _, volumeMount := range component.Container.VolumeMounts { - if volumeMount.Name == name { - mountFound = true - path = volumeMount.Path + if volumeMount.Name == mountName { + mountPaths = append(mountPaths, volumeMount.Path) } } - } else if component.Volume != nil { - volumeFound = true } } - if volumeFound && mountFound { - return path, nil - } else if !mountFound && volumeFound { - return "", fmt.Errorf("volume not mounted to any component") + + if !componentFound { + return mountPaths, &common.FieldNotFoundError{ + Field: "container component", + Name: containerName, + } } - return "", &common.FieldNotFoundError{ - Field: "volume", - Name: "name", + + if len(mountPaths) == 0 { + return mountPaths, fmt.Errorf("volume %s not mounted to component %s", mountName, containerName) } + + return mountPaths, nil } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/workspace.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/workspace.go index e73b8d47633..6cf94d56b6f 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/workspace.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/v2/workspace.go @@ -4,13 +4,22 @@ import ( v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" ) -// GetDevfileWorkspace returns the workspace content for the devfile -func (d *DevfileV2) GetDevfileWorkspace() *v1.DevWorkspaceTemplateSpecContent { +// GetDevfileWorkspaceSpecContent returns the workspace spec content for the devfile +func (d *DevfileV2) GetDevfileWorkspaceSpecContent() *v1.DevWorkspaceTemplateSpecContent { return &d.DevWorkspaceTemplateSpecContent } -// SetDevfileWorkspace sets the workspace content -func (d *DevfileV2) SetDevfileWorkspace(content v1.DevWorkspaceTemplateSpecContent) { +// SetDevfileWorkspaceSpecContent sets the workspace spec content +func (d *DevfileV2) SetDevfileWorkspaceSpecContent(content v1.DevWorkspaceTemplateSpecContent) { d.DevWorkspaceTemplateSpecContent = content } + +func (d *DevfileV2) GetDevfileWorkspaceSpec() *v1.DevWorkspaceTemplateSpec { + return &d.DevWorkspaceTemplateSpec +} + +// SetDevfileWorkspaceSpec sets the workspace spec +func (d *DevfileV2) SetDevfileWorkspaceSpec(spec v1.DevWorkspaceTemplateSpec) { + d.DevWorkspaceTemplateSpec = spec +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/data/versions.go b/vendor/github.com/devfile/library/pkg/devfile/parser/data/versions.go index c6cecc63f37..e85a026e27e 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/data/versions.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/data/versions.go @@ -6,6 +6,7 @@ import ( v2 "github.com/devfile/library/pkg/devfile/parser/data/v2" v200 "github.com/devfile/library/pkg/devfile/parser/data/v2/2.0.0" v210 "github.com/devfile/library/pkg/devfile/parser/data/v2/2.1.0" + v220 "github.com/devfile/library/pkg/devfile/parser/data/v2/2.2.0" ) // SupportedApiVersions stores the supported devfile API versions @@ -13,8 +14,10 @@ type supportedApiVersion string // Supported devfile API versions const ( - APIVersion200 supportedApiVersion = "2.0.0" - APIVersion210 supportedApiVersion = "2.1.0" + APISchemaVersion200 supportedApiVersion = "2.0.0" + APISchemaVersion210 supportedApiVersion = "2.1.0" + APISchemaVersion220 supportedApiVersion = "2.2.0" + APIVersionAlpha2 supportedApiVersion = "v1alpha2" ) // ------------- Init functions ------------- // @@ -25,8 +28,10 @@ var apiVersionToDevfileStruct map[supportedApiVersion]reflect.Type // Initializes a map of supported devfile api versions and devfile structs func init() { apiVersionToDevfileStruct = make(map[supportedApiVersion]reflect.Type) - apiVersionToDevfileStruct[APIVersion200] = reflect.TypeOf(v2.DevfileV2{}) - apiVersionToDevfileStruct[APIVersion210] = reflect.TypeOf(v2.DevfileV2{}) + apiVersionToDevfileStruct[APISchemaVersion200] = reflect.TypeOf(v2.DevfileV2{}) + apiVersionToDevfileStruct[APISchemaVersion210] = reflect.TypeOf(v2.DevfileV2{}) + apiVersionToDevfileStruct[APISchemaVersion220] = reflect.TypeOf(v2.DevfileV2{}) + apiVersionToDevfileStruct[APIVersionAlpha2] = reflect.TypeOf(v2.DevfileV2{}) } // Map to store mappings between supported devfile API versions and respective devfile JSON schemas @@ -35,6 +40,9 @@ var devfileApiVersionToJSONSchema map[supportedApiVersion]string // init initializes a map of supported devfile apiVersions with it's respective devfile JSON schema func init() { devfileApiVersionToJSONSchema = make(map[supportedApiVersion]string) - devfileApiVersionToJSONSchema[APIVersion200] = v200.JsonSchema200 - devfileApiVersionToJSONSchema[APIVersion210] = v210.JsonSchema210 + devfileApiVersionToJSONSchema[APISchemaVersion200] = v200.JsonSchema200 + devfileApiVersionToJSONSchema[APISchemaVersion210] = v210.JsonSchema210 + devfileApiVersionToJSONSchema[APISchemaVersion220] = v220.JsonSchema220 + // should use hightest v2 schema version since it is expected to be backward compatible with the same api version + devfileApiVersionToJSONSchema[APIVersionAlpha2] = v220.JsonSchema220 } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/devfileobj.go b/vendor/github.com/devfile/library/pkg/devfile/parser/devfileobj.go index f7ee33e9ee2..65c669c8c1e 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/devfileobj.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/devfileobj.go @@ -7,7 +7,6 @@ import ( // Default filenames for create devfile const ( - OutputDevfileJsonPath = "devfile.json" OutputDevfileYamlPath = "devfile.yaml" ) diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/parse.go b/vendor/github.com/devfile/library/pkg/devfile/parser/parse.go index a86459b4d36..e9a12aa2a89 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/parse.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/parse.go @@ -1,10 +1,15 @@ package parser import ( + "context" "encoding/json" "fmt" + "github.com/devfile/library/pkg/util" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" "net/url" "path" + "sigs.k8s.io/controller-runtime/pkg/client" "strings" devfileCtx "github.com/devfile/library/pkg/devfile/parser/context" @@ -22,7 +27,7 @@ import ( // ParseDevfile func validates the devfile integrity. // Creates devfile context and runtime objects -func parseDevfile(d DevfileObj, flattenedDevfile bool) (DevfileObj, error) { +func parseDevfile(d DevfileObj, resolveCtx *resolutionContextTree, tool resolverTools, flattenedDevfile bool) (DevfileObj, error) { // Validate devfile err := d.Ctx.Validate() @@ -43,92 +48,182 @@ func parseDevfile(d DevfileObj, flattenedDevfile bool) (DevfileObj, error) { } if flattenedDevfile { - err = parseParentAndPlugin(d) + err = parseParentAndPlugin(d, resolveCtx, tool) if err != nil { return DevfileObj{}, err } } - for uri := range devfileCtx.URIMap { - delete(devfileCtx.URIMap, uri) - } + // Successful return d, nil } -// Parse func populates the flattened devfile data, parses and validates the devfile integrity. +// ParserArgs is the struct to pass into parser functions which contains required info for parsing devfile. +// It accepts devfile path, devfile URL or devfile content in []byte format. +type ParserArgs struct { + // Path is a relative or absolute devfile path. + Path string + // URL is the URL address of the specific devfile. + URL string + // Data is the devfile content in []byte format. + Data []byte + // FlattenedDevfile defines if the returned devfileObj is flattened content (true) or raw content (false). + // The value is default to be true. + FlattenedDevfile *bool + // RegistryURLs is a list of registry hosts which parser should pull parent devfile from. + // If registryUrl is defined in devfile, this list will be ignored. + RegistryURLs []string + // DefaultNamespace is the default namespace to use + // If namespace is defined under devfile's parent kubernetes object, this namespace will be ignored. + DefaultNamespace string + // Context is the context used for making Kubernetes requests + Context context.Context + // K8sClient is the Kubernetes client instance used for interacting with a cluster + K8sClient client.Client +} + +// ParseDevfile func populates the devfile data, parses and validates the devfile integrity. // Creates devfile context and runtime objects -func Parse(path string) (d DevfileObj, err error) { +func ParseDevfile(args ParserArgs) (d DevfileObj, err error) { + if args.Data != nil { + d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(args.Data) + if err != nil { + return d, errors.Wrap(err, "failed to set devfile content from bytes") + } + } else if args.Path != "" { + d.Ctx = devfileCtx.NewDevfileCtx(args.Path) + } else if args.URL != "" { + d.Ctx = devfileCtx.NewURLDevfileCtx(args.URL) + } else { + return d, errors.Wrap(err, "the devfile source is not provided") + } - // NewDevfileCtx - d.Ctx = devfileCtx.NewDevfileCtx(path) + tool := resolverTools{ + defaultNamespace: args.DefaultNamespace, + registryURLs: args.RegistryURLs, + context: args.Context, + k8sClient: args.K8sClient, + } + + flattenedDevfile := true + if args.FlattenedDevfile != nil { + flattenedDevfile = *args.FlattenedDevfile + } + + d, err = populateAndParseDevfile(d, &resolutionContextTree{}, tool, flattenedDevfile) + + //set defaults only if we are flattening parent and parsing succeeded + if flattenedDevfile && err == nil { + setDefaults(d) + } + + return d, err +} + +// resolverTools contains required structs and data for resolving remote components of a devfile (plugins and parents) +type resolverTools struct { + // DefaultNamespace is the default namespace to use for resolving Kubernetes ImportReferences that do not include one + defaultNamespace string + // RegistryURLs is a list of registry hosts which parser should pull parent devfile from. + // If registryUrl is defined in devfile, this list will be ignored. + registryURLs []string + // Context is the context used for making Kubernetes or HTTP requests + context context.Context + // K8sClient is the Kubernetes client instance used for interacting with a cluster + k8sClient client.Client +} +func populateAndParseDevfile(d DevfileObj, resolveCtx *resolutionContextTree, tool resolverTools, flattenedDevfile bool) (DevfileObj, error) { + var err error + if err = resolveCtx.hasCycle(); err != nil { + return DevfileObj{}, err + } // Fill the fields of DevfileCtx struct - err = d.Ctx.Populate() + if d.Ctx.GetURL() != "" { + err = d.Ctx.PopulateFromURL() + } else if d.Ctx.GetDevfileContent() != nil { + err = d.Ctx.PopulateFromRaw() + } else { + err = d.Ctx.Populate() + } if err != nil { return d, err } - return parseDevfile(d, true) + + return parseDevfile(d, resolveCtx, tool, flattenedDevfile) +} + +// Parse func populates the flattened devfile data, parses and validates the devfile integrity. +// Creates devfile context and runtime objects +// Deprecated, use ParseDevfile() instead +func Parse(path string) (d DevfileObj, err error) { + + // NewDevfileCtx + d.Ctx = devfileCtx.NewDevfileCtx(path) + return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, true) } // ParseRawDevfile populates the raw devfile data without overriding and merging +// Deprecated, use ParseDevfile() instead func ParseRawDevfile(path string) (d DevfileObj, err error) { // NewDevfileCtx d.Ctx = devfileCtx.NewDevfileCtx(path) - - // Fill the fields of DevfileCtx struct - err = d.Ctx.Populate() - if err != nil { - return d, err - } - return parseDevfile(d, false) + return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, false) } // ParseFromURL func parses and validates the devfile integrity. // Creates devfile context and runtime objects +// Deprecated, use ParseDevfile() instead func ParseFromURL(url string) (d DevfileObj, err error) { d.Ctx = devfileCtx.NewURLDevfileCtx(url) - // Fill the fields of DevfileCtx struct - err = d.Ctx.PopulateFromURL() - if err != nil { - return d, err - } - return parseDevfile(d, true) + return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, true) } // ParseFromData func parses and validates the devfile integrity. // Creates devfile context and runtime objects +// Deprecated, use ParseDevfile() instead func ParseFromData(data []byte) (d DevfileObj, err error) { - d.Ctx = devfileCtx.DevfileCtx{} - err = d.Ctx.SetDevfileContentFromBytes(data) + d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(data) if err != nil { return d, errors.Wrap(err, "failed to set devfile content from bytes") } - err = d.Ctx.PopulateFromRaw() - if err != nil { - return d, err - } - - return parseDevfile(d, true) + return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, true) } -func parseParentAndPlugin(d DevfileObj) (err error) { +func parseParentAndPlugin(d DevfileObj, resolveCtx *resolutionContextTree, tool resolverTools) (err error) { flattenedParent := &v1.DevWorkspaceTemplateSpecContent{} - if d.Data.GetParent() != nil { - if !reflect.DeepEqual(d.Data.GetParent(), &v1.Parent{}) { + parent := d.Data.GetParent() + if parent != nil { + if !reflect.DeepEqual(parent, &v1.Parent{}) { - parent := d.Data.GetParent() var parentDevfileObj DevfileObj - if d.Data.GetParent().Uri != "" { - parentDevfileObj, err = parseFromURI(parent.Uri, d.Ctx) - if err != nil { - return err - } - } else { - return fmt.Errorf("parent URI undefined, currently only URI is suppported") + switch { + case parent.Uri != "": + parentDevfileObj, err = parseFromURI(parent.ImportReference, d.Ctx, resolveCtx, tool) + case parent.Id != "": + parentDevfileObj, err = parseFromRegistry(parent.ImportReference, resolveCtx, tool) + case parent.Kubernetes != nil: + parentDevfileObj, err = parseFromKubeCRD(parent.ImportReference, resolveCtx, tool) + default: + return fmt.Errorf("devfile parent does not define any resources") + } + if err != nil { + return err } - parentWorkspaceContent := parentDevfileObj.Data.GetDevfileWorkspace() + parentWorkspaceContent := parentDevfileObj.Data.GetDevfileWorkspaceSpecContent() + // add attribute to parent elements + err = addSourceAttributesForOverrideAndMerge(parent.ImportReference, parentWorkspaceContent) + if err != nil { + return err + } if !reflect.DeepEqual(parent.ParentOverrides, v1.ParentOverrides{}) { + // add attribute to parentOverrides elements + curNodeImportReference := resolveCtx.importReference + err = addSourceAttributesForOverrideAndMerge(curNodeImportReference, &parent.ParentOverrides) + if err != nil { + return err + } flattenedParent, err = apiOverride.OverrideDevWorkspaceTemplateSpec(parentWorkspaceContent, parent.ParentOverrides) if err != nil { return err @@ -140,6 +235,7 @@ func parseParentAndPlugin(d DevfileObj) (err error) { klog.V(4).Infof("adding data of devfile with URI: %v", parent.Uri) } } + flattenedPlugins := []*v1.DevWorkspaceTemplateSpecContent{} components, err := d.Data.GetComponents(common.DevfileOptions{}) if err != nil { @@ -149,17 +245,33 @@ func parseParentAndPlugin(d DevfileObj) (err error) { if component.Plugin != nil && !reflect.DeepEqual(component.Plugin, &v1.PluginComponent{}) { plugin := component.Plugin var pluginDevfileObj DevfileObj - if plugin.Uri != "" { - pluginDevfileObj, err = parseFromURI(plugin.Uri, d.Ctx) - if err != nil { - return err - } - } else { - return fmt.Errorf("plugin URI undefined, currently only URI is suppported") + switch { + case plugin.Uri != "": + pluginDevfileObj, err = parseFromURI(plugin.ImportReference, d.Ctx, resolveCtx, tool) + case plugin.Id != "": + pluginDevfileObj, err = parseFromRegistry(plugin.ImportReference, resolveCtx, tool) + case plugin.Kubernetes != nil: + pluginDevfileObj, err = parseFromKubeCRD(plugin.ImportReference, resolveCtx, tool) + default: + return fmt.Errorf("plugin %s does not define any resources", component.Name) + } + if err != nil { + return err + } + pluginWorkspaceContent := pluginDevfileObj.Data.GetDevfileWorkspaceSpecContent() + // add attribute to plugin elements + err = addSourceAttributesForOverrideAndMerge(plugin.ImportReference, pluginWorkspaceContent) + if err != nil { + return err } - pluginWorkspaceContent := pluginDevfileObj.Data.GetDevfileWorkspace() flattenedPlugin := pluginWorkspaceContent if !reflect.DeepEqual(plugin.PluginOverrides, v1.PluginOverrides{}) { + // add attribute to pluginOverrides elements + curNodeImportReference := resolveCtx.importReference + err = addSourceAttributesForOverrideAndMerge(curNodeImportReference, &plugin.PluginOverrides) + if err != nil { + return err + } flattenedPlugin, err = apiOverride.OverrideDevWorkspaceTemplateSpec(pluginWorkspaceContent, plugin.PluginOverrides) if err != nil { return err @@ -168,44 +280,259 @@ func parseParentAndPlugin(d DevfileObj) (err error) { flattenedPlugins = append(flattenedPlugins, flattenedPlugin) } } - mergedContent, err := apiOverride.MergeDevWorkspaceTemplateSpec(d.Data.GetDevfileWorkspace(), flattenedParent, flattenedPlugins...) + + mergedContent, err := apiOverride.MergeDevWorkspaceTemplateSpec(d.Data.GetDevfileWorkspaceSpecContent(), flattenedParent, flattenedPlugins...) if err != nil { return err } - d.Data.SetDevfileWorkspace(*mergedContent) + d.Data.SetDevfileWorkspaceSpecContent(*mergedContent) // remove parent from flatterned devfile d.Data.SetParent(nil) return nil } -func parseFromURI(uri string, curDevfileCtx devfileCtx.DevfileCtx) (DevfileObj, error) { +func parseFromURI(importReference v1.ImportReference, curDevfileCtx devfileCtx.DevfileCtx, resolveCtx *resolutionContextTree, tool resolverTools) (DevfileObj, error) { + uri := importReference.Uri // validate URI err := validation.ValidateURI(uri) if err != nil { return DevfileObj{}, err } - - // absolute URL address - if strings.HasPrefix(uri, "http://") || strings.HasPrefix(uri, "https://") { - return ParseFromURL(uri) - } + // NewDevfileCtx + var d DevfileObj + absoluteURL := strings.HasPrefix(uri, "http://") || strings.HasPrefix(uri, "https://") + var newUri string // relative path on disk - if curDevfileCtx.GetAbsPath() != "" { - return Parse(path.Join(path.Dir(curDevfileCtx.GetAbsPath()), uri)) + if !absoluteURL && curDevfileCtx.GetAbsPath() != "" { + newUri = path.Join(path.Dir(curDevfileCtx.GetAbsPath()), uri) + d.Ctx = devfileCtx.NewDevfileCtx(newUri) + } else if absoluteURL { + // absolute URL address + newUri = uri + d.Ctx = devfileCtx.NewURLDevfileCtx(newUri) + } else if curDevfileCtx.GetURL() != "" { + // relative path to a URL + u, err := url.Parse(curDevfileCtx.GetURL()) + if err != nil { + return DevfileObj{}, err + } + u.Path = path.Join(path.Dir(u.Path), uri) + newUri = u.String() + d.Ctx = devfileCtx.NewURLDevfileCtx(newUri) } + importReference.Uri = newUri + newResolveCtx := resolveCtx.appendNode(importReference) - if curDevfileCtx.GetURL() != "" { - u, err := url.Parse(curDevfileCtx.GetURL()) + return populateAndParseDevfile(d, newResolveCtx, tool, true) +} + +func parseFromRegistry(importReference v1.ImportReference, resolveCtx *resolutionContextTree, tool resolverTools) (d DevfileObj, err error) { + id := importReference.Id + registryURL := importReference.RegistryUrl + if registryURL != "" { + devfileContent, err := getDevfileFromRegistry(id, registryURL) if err != nil { return DevfileObj{}, err } + d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(devfileContent) + if err != nil { + return d, errors.Wrap(err, "failed to set devfile content from bytes") + } + newResolveCtx := resolveCtx.appendNode(importReference) - u.Path = path.Join(path.Dir(u.Path), uri) - // u.String() is the joint absolute URL path - return ParseFromURL(u.String()) + return populateAndParseDevfile(d, newResolveCtx, tool, true) + + } else if tool.registryURLs != nil { + for _, registryURL := range tool.registryURLs { + devfileContent, err := getDevfileFromRegistry(id, registryURL) + if devfileContent != nil && err == nil { + d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(devfileContent) + if err != nil { + return d, errors.Wrap(err, "failed to set devfile content from bytes") + } + importReference.RegistryUrl = registryURL + newResolveCtx := resolveCtx.appendNode(importReference) + + return populateAndParseDevfile(d, newResolveCtx, tool, true) + } + } + } else { + return DevfileObj{}, fmt.Errorf("failed to fetch from registry, registry URL is not provided") + } + + return DevfileObj{}, fmt.Errorf("failed to get id: %s from registry URLs provided", id) +} + +func getDevfileFromRegistry(id, registryURL string) ([]byte, error) { + if !strings.HasPrefix(registryURL, "http://") && !strings.HasPrefix(registryURL, "https://") { + return nil, fmt.Errorf("the provided registryURL: %s is not a valid URL", registryURL) + } + param := util.HTTPRequestParams{ + URL: fmt.Sprintf("%s/devfiles/%s", registryURL, id), + } + return util.HTTPGetRequest(param, 0) +} + +func parseFromKubeCRD(importReference v1.ImportReference, resolveCtx *resolutionContextTree, tool resolverTools) (d DevfileObj, err error) { + + if tool.k8sClient == nil || tool.context == nil { + return DevfileObj{}, fmt.Errorf("Kubernetes client and context are required to parse from Kubernetes CRD") + } + namespace := importReference.Kubernetes.Namespace + + if namespace == "" { + // if namespace is not set in devfile, use default namespace provided in by consumer + if tool.defaultNamespace != "" { + namespace = tool.defaultNamespace + } else { + // use current namespace if namespace is not set in devfile and not provided by consumer + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + configOverrides := &clientcmd.ConfigOverrides{} + config := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + namespace, _, err = config.Namespace() + if err != nil { + return DevfileObj{}, fmt.Errorf("kubernetes namespace is not provided, and cannot get current running cluster's namespace: %v", err) + } + } + } + + var dwTemplate v1.DevWorkspaceTemplate + namespacedName := types.NamespacedName{ + Name: importReference.Kubernetes.Name, + Namespace: namespace, + } + err = tool.k8sClient.Get(tool.context, namespacedName, &dwTemplate) + if err != nil { + return DevfileObj{}, err + } + + d, err = convertDevWorskapceTemplateToDevObj(dwTemplate) + if err != nil { + return DevfileObj{}, err + } + + importReference.Kubernetes.Namespace = namespace + newResolveCtx := resolveCtx.appendNode(importReference) + + err = parseParentAndPlugin(d, newResolveCtx, tool) + return d, err + +} + +func convertDevWorskapceTemplateToDevObj(dwTemplate v1.DevWorkspaceTemplate) (d DevfileObj, err error) { + // APIVersion: group/version + // for example: APIVersion: "workspace.devfile.io/v1alpha2" uses api version v1alpha2, and match to v2 schemas + tempList := strings.Split(dwTemplate.APIVersion, "/") + apiversion := tempList[len(tempList)-1] + d.Data, err = data.NewDevfileData(apiversion) + if err != nil { + return DevfileObj{}, err + } + d.Data.SetDevfileWorkspaceSpec(dwTemplate.Spec) + + return d, nil + +} + +//setDefaults sets the default values for nil boolean properties after the merging of devWorkspaceTemplateSpec is complete +func setDefaults(d DevfileObj) (err error) { + commands, err := d.Data.GetCommands(common.DevfileOptions{}) + + if err != nil { + return err + } + + //set defaults on the commands + var cmdGroup *v1.CommandGroup + for i := range commands { + command := commands[i] + cmdGroup = nil + + if command.Exec != nil { + exec := command.Exec + val := exec.GetHotReloadCapable() + exec.HotReloadCapable = &val + cmdGroup = exec.Group + + } else if command.Composite != nil { + composite := command.Composite + val := composite.GetParallel() + composite.Parallel = &val + cmdGroup = composite.Group + + } else if command.Apply != nil { + cmdGroup = command.Apply.Group + } + + if cmdGroup != nil { + setIsDefault(cmdGroup) + } + + } + + //set defaults on the components + + components, err := d.Data.GetComponents(common.DevfileOptions{}) + + if err != nil { + return err } - return DevfileObj{}, fmt.Errorf("fail to parse from uri: %s", uri) + var endpoints []v1.Endpoint + for i := range components { + component := components[i] + endpoints = nil + + if component.Container != nil { + container := component.Container + val := container.GetDedicatedPod() + container.DedicatedPod = &val + + val = container.GetMountSources() + container.MountSources = &val + + endpoints = container.Endpoints + + } else if component.Kubernetes != nil { + endpoints = component.Kubernetes.Endpoints + + } else if component.Openshift != nil { + + endpoints = component.Openshift.Endpoints + + } else if component.Volume != nil { + volume := component.Volume + val := volume.GetEphemeral() + volume.Ephemeral = &val + + } else if component.Image != nil { + dockerImage := component.Image.Dockerfile + if dockerImage != nil { + val := dockerImage.GetRootRequired() + dockerImage.RootRequired = &val + } + } + + if endpoints != nil { + setEndpoints(endpoints) + } + } + + return nil +} + +///setIsDefault sets the default value of CommandGroup.IsDefault if nil +func setIsDefault(cmdGroup *v1.CommandGroup) { + val := cmdGroup.GetIsDefault() + cmdGroup.IsDefault = &val +} + +//setEndpoints sets the default value of Endpoint.Secure if nil +func setEndpoints(endpoints []v1.Endpoint) { + for i := range endpoints { + val := endpoints[i].GetSecure() + endpoints[i].Secure = &val + } } diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/resolutionContext.go b/vendor/github.com/devfile/library/pkg/devfile/parser/resolutionContext.go new file mode 100644 index 00000000000..5bb0d4bf46e --- /dev/null +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/resolutionContext.go @@ -0,0 +1,64 @@ +package parser + +import ( + "fmt" + "reflect" + + v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" +) + +// resolutionContextTree is a recursive structure representing information about the devfile that is +// lost when flattening (e.g. plugins, parents) +type resolutionContextTree struct { + importReference v1.ImportReference + parentNode *resolutionContextTree +} + +// appendNode adds a new node to the resolution context. +func (t *resolutionContextTree) appendNode(importReference v1.ImportReference) *resolutionContextTree { + newNode := &resolutionContextTree{ + importReference: importReference, + parentNode: t, + } + return newNode +} + +// hasCycle checks if the current resolutionContextTree has a cycle +func (t *resolutionContextTree) hasCycle() error { + var seenRefs []v1.ImportReference + currNode := t + hasCycle := false + cycle := resolveImportReference(t.importReference) + + for currNode.parentNode != nil { + for _, seenRef := range seenRefs { + if reflect.DeepEqual(seenRef, currNode.importReference) { + hasCycle = true + } + } + seenRefs = append(seenRefs, currNode.importReference) + currNode = currNode.parentNode + cycle = fmt.Sprintf("%s -> %s", resolveImportReference(currNode.importReference), cycle) + } + + if hasCycle { + return fmt.Errorf("devfile has an cycle in references: %v", cycle) + } + return nil +} + +func resolveImportReference(importReference v1.ImportReference) string { + if !reflect.DeepEqual(importReference, v1.ImportReference{}) { + switch { + case importReference.Uri != "": + return fmt.Sprintf("uri: %s", importReference.Uri) + case importReference.Id != "": + return fmt.Sprintf("id: %s, registryURL: %s", importReference.Id, importReference.RegistryUrl) + case importReference.Kubernetes != nil: + return fmt.Sprintf("name: %s, namespace: %s", importReference.Kubernetes.Name, importReference.Kubernetes.Namespace) + } + + } + // the first node + return "main devfile" +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/sourceAttribute.go b/vendor/github.com/devfile/library/pkg/devfile/parser/sourceAttribute.go new file mode 100644 index 00000000000..dd7608fc0ba --- /dev/null +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/sourceAttribute.go @@ -0,0 +1,115 @@ +package parser + +import ( + "fmt" + v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/api/v2/pkg/attributes" + "github.com/devfile/api/v2/pkg/validation" +) + +const ( + importSourceAttribute = validation.ImportSourceAttribute + parentOverrideAttribute = validation.ParentOverrideAttribute + pluginOverrideAttribute = validation.PluginOverrideAttribute +) + +// addSourceAttributesForParentOverride adds an attribute 'api.devfile.io/imported-from=' +// to all elements of template spec content that support attributes. +func addSourceAttributesForTemplateSpecContent(sourceImportReference v1.ImportReference, template *v1.DevWorkspaceTemplateSpecContent) { + for idx, component := range template.Components { + if component.Attributes == nil { + template.Components[idx].Attributes = attributes.Attributes{} + } + template.Components[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) + } + for idx, command := range template.Commands { + if command.Attributes == nil { + template.Commands[idx].Attributes = attributes.Attributes{} + } + template.Commands[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) + } + for idx, project := range template.Projects { + if project.Attributes == nil { + template.Projects[idx].Attributes = attributes.Attributes{} + } + template.Projects[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) + } + for idx, project := range template.StarterProjects { + if project.Attributes == nil { + template.StarterProjects[idx].Attributes = attributes.Attributes{} + } + template.StarterProjects[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) + } +} + +// addSourceAttributesForParentOverride adds an attribute 'api.devfile.io/parent-override-from=' +// to all elements of parent override that support attributes. +func addSourceAttributesForParentOverride(sourceImportReference v1.ImportReference, parentOverrides *v1.ParentOverrides) { + for idx, component := range parentOverrides.Components { + if component.Attributes == nil { + parentOverrides.Components[idx].Attributes = attributes.Attributes{} + } + parentOverrides.Components[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) + } + for idx, command := range parentOverrides.Commands { + if command.Attributes == nil { + parentOverrides.Commands[idx].Attributes = attributes.Attributes{} + } + parentOverrides.Commands[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) + } + for idx, project := range parentOverrides.Projects { + if project.Attributes == nil { + parentOverrides.Projects[idx].Attributes = attributes.Attributes{} + } + parentOverrides.Projects[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) + } + for idx, project := range parentOverrides.StarterProjects { + if project.Attributes == nil { + parentOverrides.StarterProjects[idx].Attributes = attributes.Attributes{} + } + parentOverrides.StarterProjects[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) + } + +} + +// addSourceAttributesForPluginOverride adds an attribute 'api.devfile.io/plugin-override-from=' +// to all elements of plugin override that support attributes. +func addSourceAttributesForPluginOverride(sourceImportReference v1.ImportReference, pluginOverrides *v1.PluginOverrides) { + for idx, component := range pluginOverrides.Components { + if component.Attributes == nil { + pluginOverrides.Components[idx].Attributes = attributes.Attributes{} + } + pluginOverrides.Components[idx].Attributes.PutString(pluginOverrideAttribute, resolveImportReference(sourceImportReference)) + } + for idx, command := range pluginOverrides.Commands { + if command.Attributes == nil { + pluginOverrides.Commands[idx].Attributes = attributes.Attributes{} + } + pluginOverrides.Commands[idx].Attributes.PutString(pluginOverrideAttribute, resolveImportReference(sourceImportReference)) + } + +} + +// addSourceAttributesForOverrideAndMerge adds an attribute record the import reference to all elements of template that support attributes. +func addSourceAttributesForOverrideAndMerge(sourceImportReference v1.ImportReference, template interface{}) error { + if template == nil { + return fmt.Errorf("cannot add source attributes to nil") + } + + mainContent, isMainContent := template.(*v1.DevWorkspaceTemplateSpecContent) + parentOverride, isParentOverride := template.(*v1.ParentOverrides) + pluginOverride, isPluginOverride := template.(*v1.PluginOverrides) + + switch { + case isMainContent: + addSourceAttributesForTemplateSpecContent(sourceImportReference, mainContent) + case isParentOverride: + addSourceAttributesForParentOverride(sourceImportReference, parentOverride) + case isPluginOverride: + addSourceAttributesForPluginOverride(sourceImportReference, pluginOverride) + default: + return fmt.Errorf("unknown template type") + } + + return nil +} diff --git a/vendor/github.com/devfile/library/pkg/devfile/parser/writer.go b/vendor/github.com/devfile/library/pkg/devfile/parser/writer.go index a98ceca7307..78abd8f954e 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/parser/writer.go +++ b/vendor/github.com/devfile/library/pkg/devfile/parser/writer.go @@ -1,8 +1,6 @@ package parser import ( - "encoding/json" - "sigs.k8s.io/yaml" "github.com/devfile/library/pkg/testingutil/filesystem" @@ -10,27 +8,6 @@ import ( "k8s.io/klog" ) -// WriteJsonDevfile creates a devfile.json file -func (d *DevfileObj) WriteJsonDevfile() error { - - // Encode data into JSON format - jsonData, err := json.MarshalIndent(d.Data, "", " ") - if err != nil { - return errors.Wrapf(err, "failed to marshal devfile object into json") - } - - // Write to devfile.json - fs := d.Ctx.GetFs() - err = fs.WriteFile(d.Ctx.GetAbsPath(), jsonData, 0644) - if err != nil { - return errors.Wrapf(err, "failed to create devfile json file") - } - - // Successful - klog.V(2).Infof("devfile json created at: '%s'", OutputDevfileJsonPath) - return nil -} - // WriteYamlDevfile creates a devfile.yaml file func (d *DevfileObj) WriteYamlDevfile() error { diff --git a/vendor/github.com/devfile/library/pkg/devfile/validate/validate.go b/vendor/github.com/devfile/library/pkg/devfile/validate/validate.go index c4286607543..a1fa0026552 100644 --- a/vendor/github.com/devfile/library/pkg/devfile/validate/validate.go +++ b/vendor/github.com/devfile/library/pkg/devfile/validate/validate.go @@ -6,7 +6,7 @@ import ( devfileData "github.com/devfile/library/pkg/devfile/parser/data" v2 "github.com/devfile/library/pkg/devfile/parser/data/v2" "github.com/devfile/library/pkg/devfile/parser/data/v2/common" - "strings" + "github.com/hashicorp/go-multierror" ) // ValidateDevfileData validates whether sections of devfile are compatible @@ -29,48 +29,39 @@ func ValidateDevfileData(data devfileData.DevfileData) error { return err } - var errstrings []string + var returnedErr error switch d := data.(type) { case *v2.DevfileV2: // validate components err = v2Validation.ValidateComponents(components) if err != nil { - errstrings = append(errstrings, err.Error()) + returnedErr = multierror.Append(returnedErr, err) } // validate commands err = v2Validation.ValidateCommands(commands, components) if err != nil { - errstrings = append(errstrings, err.Error()) + returnedErr = multierror.Append(returnedErr, err) } err = v2Validation.ValidateEvents(data.GetEvents(), commands) if err != nil { - errstrings = append(errstrings, err.Error()) + returnedErr = multierror.Append(returnedErr, err) } err = v2Validation.ValidateProjects(projects) if err != nil { - errstrings = append(errstrings, err.Error()) + returnedErr = multierror.Append(returnedErr, err) } err = v2Validation.ValidateStarterProjects(starterProjects) if err != nil { - errstrings = append(errstrings, err.Error()) + returnedErr = multierror.Append(returnedErr, err) } - if len(errstrings) > 0 { - return fmt.Errorf(strings.Join(errstrings, "\n")) - } else { - return nil - } + return returnedErr + default: return fmt.Errorf("unknown devfile type %T", d) } - - if len(errstrings) > 0 { - return fmt.Errorf(strings.Join(errstrings, "\n")) - } - - return nil } diff --git a/vendor/github.com/devfile/library/pkg/util/util.go b/vendor/github.com/devfile/library/pkg/util/util.go index e8f4b2cdda1..cb97c23fc04 100644 --- a/vendor/github.com/devfile/library/pkg/util/util.go +++ b/vendor/github.com/devfile/library/pkg/util/util.go @@ -35,6 +35,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/client-go/util/homedir" "k8s.io/klog" ) @@ -768,7 +769,7 @@ func HTTPGetRequest(request HTTPRequestParams, cacheFor int) ([]byte, error) { // We have a non 1xx / 2xx status, return an error if (resp.StatusCode - 300) > 0 { - return nil, errors.Errorf("fail to retrive %s: %s", request.URL, http.StatusText(resp.StatusCode)) + return nil, errors.Errorf("failed to retrieve %s, %v: %s", request.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) } // Process http response @@ -1030,6 +1031,10 @@ func DownloadFileInMemory(url string) ([]byte, error) { if err != nil { return nil, err } + // We have a non 1xx / 2xx status, return an error + if (resp.StatusCode - 300) > 0 { + return nil, errors.Errorf("failed to retrieve %s, %v: %s", url, resp.StatusCode, http.StatusText(resp.StatusCode)) + } defer resp.Body.Close() return ioutil.ReadAll(resp.Body) @@ -1067,8 +1072,13 @@ func CheckKubeConfigExist() bool { if os.Getenv("KUBECONFIG") != "" { kubeconfig = os.Getenv("KUBECONFIG") } else { - home, _ := os.UserHomeDir() - kubeconfig = fmt.Sprintf("%s/.kube/config", home) + if home := homedir.HomeDir(); home != "" { + kubeconfig = filepath.Join(home, ".kube", "config") + klog.V(4).Infof("using default kubeconfig path %s", kubeconfig) + } else { + klog.V(4).Infof("no KUBECONFIG provided and cannot fallback to default") + return false + } } if CheckPathExists(kubeconfig) { diff --git a/vendor/github.com/devfile/registry-support/index/generator/LICENSE b/vendor/github.com/devfile/registry-support/index/generator/LICENSE index e48e0963459..261eeb9e9f8 100644 --- a/vendor/github.com/devfile/registry-support/index/generator/LICENSE +++ b/vendor/github.com/devfile/registry-support/index/generator/LICENSE @@ -1,277 +1,201 @@ -Eclipse Public License - v 2.0 - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE - PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION - OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - - a) in the case of the initial Contributor, the initial content - Distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from - and are Distributed by that particular Contributor. A Contribution - "originates" from a Contributor if it was added to the Program by - such Contributor itself or anyone acting on such Contributor's behalf. - Contributions do not include changes or additions to the Program that - are not Modified Works. - -"Contributor" means any person or entity that Distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which -are necessarily infringed by the use or sale of its Contribution alone -or when combined with the Program. - -"Program" means the Contributions Distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement -or any Secondary License (as applicable), including Contributors. - -"Derivative Works" shall mean any work, whether in Source Code or other -form, that is based on (or derived from) the Program and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. - -"Modified Works" shall mean any work in Source Code or other form that -results from an addition to, deletion from, or modification of the -contents of the Program, including, for purposes of clarity any new file -in Source Code form that contains any contents of the Program. Modified -Works shall not include works that contain only declarations, -interfaces, types, classes, structures, or files of the Program solely -in each case in order to link to, bind by name, or subclass the Program -or Modified Works thereof. - -"Distribute" means the acts of a) distributing or b) making available -in any manner that enables the transfer of a copy. - -"Source Code" means the form of a Program preferred for making -modifications, including but not limited to software source code, -documentation source, and configuration files. - -"Secondary License" means either the GNU General Public License, -Version 2.0, or any later versions of that license, including any -exceptions or additional permissions as identified by the initial -Contributor. - -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free copyright - license to reproduce, prepare Derivative Works of, publicly display, - publicly perform, Distribute and sublicense the Contribution of such - Contributor, if any, and such Derivative Works. - - b) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free patent - license under Licensed Patents to make, use, sell, offer to sell, - import and otherwise transfer the Contribution of such Contributor, - if any, in Source Code or other form. This patent license shall - apply to the combination of the Contribution and the Program if, at - the time the Contribution is added by the Contributor, such addition - of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other - combinations which include the Contribution. No hardware per se is - licensed hereunder. - - c) Recipient understands that although each Contributor grants the - licenses to its Contributions set forth herein, no assurances are - provided by any Contributor that the Program does not infringe the - patent or other intellectual property rights of any other entity. - Each Contributor disclaims any liability to Recipient for claims - brought by any other entity based on infringement of intellectual - property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby - assumes sole responsibility to secure any other intellectual - property rights needed, if any. For example, if a third party - patent license is required to allow Recipient to Distribute the - Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has - sufficient copyright rights in its Contribution, if any, to grant - the copyright license set forth in this Agreement. - - e) Notwithstanding the terms of any Secondary License, no - Contributor makes additional grants to any Recipient (other than - those set forth in this Agreement) as a result of such Recipient's - receipt of the Program under the terms of a Secondary License - (if permitted under the terms of Section 3). - -3. REQUIREMENTS - -3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in - accordance with section 3.2, and the Contributor must accompany - the Program with a statement that the Source Code for the Program - is available under this Agreement, and informs Recipients how to - obtain it in a reasonable manner on or through a medium customarily - used for software exchange; and - - b) the Contributor may Distribute the Program under a license - different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all - warranties and conditions, express and implied, including - warranties or conditions of title and non-infringement, and - implied warranties or conditions of merchantability and fitness - for a particular purpose; - - ii) effectively excludes on behalf of all other Contributors all - liability for damages, including direct, indirect, special, - incidental and consequential damages, such as lost profits; - - iii) does not attempt to limit or alter the recipients' rights - in the Source Code under section 3.2; and - - iv) requires any subsequent distribution of the Program by any - party to be under a license that satisfies the requirements - of this section 3. - -3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the - Program (i) is combined with other material in a separate file or - files made available under a Secondary License, and (ii) the initial - Contributor attached to the Source Code the notice described in - Exhibit A of this Agreement, then the Program may be made available - under the terms of such Secondary Licenses, and - - b) a copy of this Agreement must be included with each copy of - the Program. - -3.3 Contributors may not remove or alter any copyright, patent, -trademark, attribution notices, disclaimers of warranty, or limitations -of liability ("notices") contained within the Program from any copy of -the Program which they Distribute, provided that Contributors may add -their own appropriate notices. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities -with respect to end users, business partners and the like. While this -license is intended to facilitate the commercial use of the Program, -the Contributor who includes the Program in a commercial product -offering should do so in a manner which does not create potential -liability for other Contributors. Therefore, if a Contributor includes -the Program in a commercial product offering, such Contributor -("Commercial Contributor") hereby agrees to defend and indemnify every -other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits -and other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such -Commercial Contributor in connection with its distribution of the Program -in a commercial product offering. The obligations in this section do not -apply to any claims or Losses relating to any actual or alleged -intellectual property infringement. In order to qualify, an Indemnified -Contributor must: a) promptly notify the Commercial Contributor in -writing of such claim, and b) allow the Commercial Contributor to control, -and cooperate with the Commercial Contributor in, the defense and any -related settlement negotiations. The Indemnified Contributor may -participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial -product offering, Product X. That Contributor is then a Commercial -Contributor. If that Commercial Contributor then makes performance -claims, or offers warranties related to Product X, those performance -claims and warranties are such Commercial Contributor's responsibility -alone. Under this section, the Commercial Contributor would have to -defend claims against the other Contributors related to those performance -claims and warranties, and if a court requires any other Contributor to -pay any damages as a result, the Commercial Contributor must pay -those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" -BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF -TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR -PURPOSE. Each Recipient is solely responsible for determining the -appropriateness of using and distributing the Program and assumes all -risks associated with its exercise of rights under this Agreement, -including but not limited to the risks and costs of program errors, -compliance with applicable laws, damage to or loss of data, programs -or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS -SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST -PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of -the remainder of the terms of this Agreement, and without further -action by the parties hereto, such provision shall be reformed to the -minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity -(including a cross-claim or counterclaim in a lawsuit) alleging that the -Program itself (excluding combinations of the Program with other software -or hardware) infringes such Recipient's patent(s), then such Recipient's -rights granted under Section 2(b) shall terminate as of the date such -litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it -fails to comply with any of the material terms or conditions of this -Agreement and does not cure such failure in a reasonable period of -time after becoming aware of such noncompliance. If all Recipient's -rights under this Agreement terminate, Recipient agrees to cease use -and distribution of the Program as soon as reasonably practicable. -However, Recipient's obligations under this Agreement and any licenses -granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, -but in order to avoid inconsistency the Agreement is copyrighted and -may only be modified in the following manner. The Agreement Steward -reserves the right to publish new versions (including revisions) of -this Agreement from time to time. No one other than the Agreement -Steward has the right to modify this Agreement. The Eclipse Foundation -is the initial Agreement Steward. The Eclipse Foundation may assign the -responsibility to serve as the Agreement Steward to a suitable separate -entity. Each new version of the Agreement will be given a distinguishing -version number. The Program (including Contributions) may always be -Distributed subject to the version of the Agreement under which it was -received. In addition, after a new version of the Agreement is published, -Contributor may elect to Distribute the Program (including its -Contributions) under the new version. - -Except as expressly stated in Sections 2(a) and 2(b) above, Recipient -receives no rights or licenses to the intellectual property of any -Contributor under this Agreement, whether expressly, by implication, -estoppel or otherwise. All rights in the Program not expressly granted -under this Agreement are reserved. Nothing in this Agreement is intended -to be enforceable by any entity that is not a Contributor or Recipient. -No third-party beneficiary rights are created under this Agreement. - -Exhibit A - Form of Secondary Licenses Notice - -"This Source Code may also be made available under the following -Secondary Licenses when the conditions for such availability set forth -in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), -version(s), and exceptions or additional permissions here}." - - Simply including a copy of this Agreement, including this Exhibit A - is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular - file, then You may include the notice in a location (such as a LICENSE - file in a relevant directory) where a recipient would be likely to - look for such a notice. - - You may add additional accurate notices of copyright ownership. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/devfile/registry-support/index/generator/schema/schema.go b/vendor/github.com/devfile/registry-support/index/generator/schema/schema.go index 065c3102446..adeb72a7203 100644 --- a/vendor/github.com/devfile/registry-support/index/generator/schema/schema.go +++ b/vendor/github.com/devfile/registry-support/index/generator/schema/schema.go @@ -67,6 +67,7 @@ links: map[string]string - Links related to the devfile resources: []string - The file resources that compose a devfile stack. starterProjects: string[] - The project templates that can be used in the devfile git: *git - The information of remote repositories +provider: string - The devfile provider information */ // Schema is the index file schema @@ -78,6 +79,7 @@ type Schema struct { Description string `yaml:"description,omitempty" json:"description,omitempty"` Type DevfileType `yaml:"type,omitempty" json:"type,omitempty"` Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` + Architectures []string `yaml:"architectures,omitempty" json:"architectures,omitempty"` Icon string `yaml:"icon,omitempty" json:"icon,omitempty"` GlobalMemoryLimit string `yaml:"globalMemoryLimit,omitempty" json:"globalMemoryLimit,omitempty"` ProjectType string `yaml:"projectType,omitempty" json:"projectType,omitempty"` @@ -86,6 +88,8 @@ type Schema struct { Resources []string `yaml:"resources,omitempty" json:"resources,omitempty"` StarterProjects []string `yaml:"starterProjects,omitempty" json:"starterProjects,omitempty"` Git *Git `yaml:"git,omitempty" json:"git,omitempty"` + Provider string `yaml:"provider,omitempty" json:"provider,omitempty"` + SupportUrl string `yaml:"supportUrl,omitempty" json:"supportUrl,omitempty"` } // DevfileType describes the type of devfile diff --git a/vendor/github.com/devfile/registry-support/registry-library/LICENSE b/vendor/github.com/devfile/registry-support/registry-library/LICENSE new file mode 100644 index 00000000000..e55f34467e2 --- /dev/null +++ b/vendor/github.com/devfile/registry-support/registry-library/LICENSE @@ -0,0 +1,277 @@ +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. \ No newline at end of file diff --git a/vendor/github.com/devfile/registry-support/registry-library/library/library.go b/vendor/github.com/devfile/registry-support/registry-library/library/library.go new file mode 100644 index 00000000000..fde9800bbb9 --- /dev/null +++ b/vendor/github.com/devfile/registry-support/registry-library/library/library.go @@ -0,0 +1,323 @@ +// +// Copyright (c) 2020 Red Hat, Inc. +// This program and the accompanying materials are made +// available under the terms of the Eclipse Public License 2.0 +// which is available at https://www.eclipse.org/legal/epl-2.0/ +// +// SPDX-License-Identifier: EPL-2.0 +// +// Contributors: +// Red Hat, Inc. - initial API and implementation + +package library + +import ( + "archive/tar" + "compress/gzip" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "text/tabwriter" + "time" + + orasctx "github.com/deislabs/oras/pkg/context" + + "github.com/containerd/containerd/remotes/docker" + "github.com/deislabs/oras/pkg/content" + "github.com/deislabs/oras/pkg/oras" + indexSchema "github.com/devfile/registry-support/index/generator/schema" +) + +const ( + // Devfile media types + DevfileConfigMediaType = "application/vnd.devfileio.devfile.config.v2+json" + DevfileMediaType = "application/vnd.devfileio.devfile.layer.v1" + DevfileVSXMediaType = "application/vnd.devfileio.vsx.layer.v1.tar" + DevfileSVGLogoMediaType = "image/svg+xml" + DevfilePNGLogoMediaType = "image/png" + DevfileArchiveMediaType = "application/x-tar" + + httpRequestTimeout = 30 * time.Second // httpRequestTimeout configures timeout of all HTTP requests + responseHeaderTimeout = 30 * time.Second // responseHeaderTimeout is the timeout to retrieve the server's response headers +) + +var ( + DevfileMediaTypeList = []string{DevfileMediaType} + DevfileAllMediaTypesList = []string{DevfileMediaType, DevfilePNGLogoMediaType, DevfileSVGLogoMediaType, DevfileVSXMediaType, DevfileArchiveMediaType} +) + +type Registry struct { + registryURL string + registryContents []indexSchema.Schema + err error +} + +type RegistryOptions struct { + SkipTLSVerify bool + User string + Filter RegistryFilter +} + +type RegistryFilter struct { + Architectures []string +} + +// GetRegistryIndex returns the list of stacks and/or samples, more specifically +// it gets the stacks and/or samples content of the index of the specified registry +// for listing the stacks and/or samples +func GetRegistryIndex(registryURL string, options RegistryOptions, devfileTypes ...indexSchema.DevfileType) ([]indexSchema.Schema, error) { + var registryIndex []indexSchema.Schema + + // Call index server REST API to get the index + urlObj, err := url.Parse(registryURL) + if err != nil { + return nil, err + } + getStack := false + getSample := false + for _, devfileType := range devfileTypes { + if devfileType == indexSchema.StackDevfileType { + getStack = true + } else if devfileType == indexSchema.SampleDevfileType { + getSample = true + } + } + + var endpoint string + if getStack && getSample { + endpoint = path.Join("index", "all") + } else if getStack && !getSample { + endpoint = "index" + } else if getSample && !getStack { + endpoint = path.Join("index", "sample") + } else { + return registryIndex, nil + } + + if !reflect.DeepEqual(options.Filter, RegistryFilter{}) { + endpoint = endpoint + "?" + } + + if len(options.Filter.Architectures) > 0 { + for _, arch := range options.Filter.Architectures { + endpoint = endpoint + "arch=" + arch + "&" + } + endpoint = strings.TrimSuffix(endpoint, "&") + } + + endpointURL, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + + urlObj = urlObj.ResolveReference(endpointURL) + + url := urlObj.String() + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if options.User != "" { + req.Header.Add("User", options.User) + } + httpClient := &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: responseHeaderTimeout, + TLSClientConfig: &tls.Config{InsecureSkipVerify: options.SkipTLSVerify}, + }, + Timeout: httpRequestTimeout, + } + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + bytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + err = json.Unmarshal(bytes, ®istryIndex) + if err != nil { + return nil, err + } + return registryIndex, nil +} + +// GetMultipleRegistryIndices returns returns the list of stacks and/or samples of multiple registries +func GetMultipleRegistryIndices(registryURLs []string, options RegistryOptions, devfileTypes ...indexSchema.DevfileType) []Registry { + registryList := make([]Registry, len(registryURLs)) + registryContentsChannel := make(chan []indexSchema.Schema) + errChannel := make(chan error) + + for index, registryURL := range registryURLs { + go func(chan []indexSchema.Schema, chan error) { + registryContents, err := GetRegistryIndex(registryURL, options, devfileTypes...) + registryContentsChannel <- registryContents + errChannel <- err + }(registryContentsChannel, errChannel) + registryList[index].registryURL = registryURL + registryList[index].registryContents = <-registryContentsChannel + registryList[index].err = <-errChannel + } + return registryList +} + +// PrintRegistry prints the registry with devfile type +func PrintRegistry(registryURLs string, devfileType string, options RegistryOptions) error { + // Get the registry index + registryURLArray := strings.Split(registryURLs, ",") + var registryList []Registry + + if devfileType == string(indexSchema.StackDevfileType) { + registryList = GetMultipleRegistryIndices(registryURLArray, options, indexSchema.StackDevfileType) + } else if devfileType == string(indexSchema.SampleDevfileType) { + registryList = GetMultipleRegistryIndices(registryURLArray, options, indexSchema.SampleDevfileType) + } else if devfileType == "all" { + registryList = GetMultipleRegistryIndices(registryURLArray, options, indexSchema.StackDevfileType, indexSchema.SampleDevfileType) + } + + w := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) + fmt.Fprintln(w, "Name", "\t", "Description", "\t", "Registry", "\t", "Error", "\t") + for _, devfileRegistry := range registryList { + if devfileRegistry.err != nil { + fmt.Fprintln(w, "NONE", "\t", "NONE", "\t", devfileRegistry.registryURL, devfileRegistry.err.Error(), "\t") + } else { + for _, devfileEntry := range devfileRegistry.registryContents { + fmt.Fprintln(w, devfileEntry.Name, "\t", devfileEntry.Description, "\t", devfileRegistry.registryURL, "\t", "NONE", "\t") + } + } + } + w.Flush() + return nil +} + +// PullStackByMediaTypesFromRegistry pulls stack from registry with allowed media types to the destination directory +func PullStackByMediaTypesFromRegistry(registry string, stack string, allowedMediaTypes []string, destDir string, options RegistryOptions) error { + // Get the registry index + registryIndex, err := GetRegistryIndex(registry, options, indexSchema.StackDevfileType) + if err != nil { + return err + } + + // Parse the index to get the specified stack's metadata in the index + var stackIndex indexSchema.Schema + exist := false + for _, item := range registryIndex { + if item.Name == stack { + stackIndex = item + exist = true + break + } + } + if !exist { + return fmt.Errorf("stack %s does not exist in the registry %s", stack, registry) + } + + // Pull stack initialization + ctx := orasctx.Background() + urlObj, err := url.Parse(registry) + if err != nil { + return err + } + plainHTTP := true + if urlObj.Scheme == "https" { + plainHTTP = false + } + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: options.SkipTLSVerify}, + }, + } + headers := make(http.Header) + if options.User != "" { + headers.Add("User", options.User) + } + resolver := docker.NewResolver(docker.ResolverOptions{Headers: headers, PlainHTTP: plainHTTP, Client: httpClient}) + ref := path.Join(urlObj.Host, stackIndex.Links["self"]) + fileStore := content.NewFileStore(destDir) + defer fileStore.Close() + + // Pull stack from registry and save it to disk + _, _, err = oras.Pull(ctx, resolver, ref, fileStore, oras.WithAllowedMediaTypes(allowedMediaTypes)) + if err != nil { + return fmt.Errorf("failed to pull stack %s from %s with allowed media types %v: %v", stack, ref, allowedMediaTypes, err) + } + + // Decompress archive.tar + archivePath := filepath.Join(destDir, "archive.tar") + if _, err := os.Stat(archivePath); err == nil { + err := decompress(destDir, archivePath) + if err != nil { + return err + } + + err = os.RemoveAll(archivePath) + if err != nil { + return err + } + } + + return nil +} + +// PullStackFromRegistry pulls stack from registry with all stack resources (all media types) to the destination directory +func PullStackFromRegistry(registry string, stack string, destDir string, options RegistryOptions) error { + return PullStackByMediaTypesFromRegistry(registry, stack, DevfileAllMediaTypesList, destDir, options) +} + +// decompress extracts the archive file +func decompress(targetDir string, tarFile string) error { + reader, err := os.Open(tarFile) + if err != nil { + return err + } + defer reader.Close() + + gzReader, err := gzip.NewReader(reader) + if err != nil { + return err + } + defer gzReader.Close() + + tarReader := tar.NewReader(gzReader) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + target := path.Join(targetDir, header.Name) + switch header.Typeflag { + case tar.TypeDir: + err = os.MkdirAll(target, os.FileMode(header.Mode)) + if err != nil { + return err + } + case tar.TypeReg: + w, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + _, err = io.Copy(w, tarReader) + if err != nil { + return err + } + w.Close() + default: + log.Printf("Unsupported type: %v", header.Typeflag) + } + } + + return nil +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31e00a..00000000000 --- a/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 98ad417e22e..00000000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.9" - - "1.10" - - "1.11" -script: - - go test - - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36de73..00000000000 --- a/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75b4d1..00000000000 --- a/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 58600740266..00000000000 --- a/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'Å¿' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/ghodss/yaml/go.mod b/vendor/github.com/ghodss/yaml/go.mod deleted file mode 100644 index 8d9ad7b6406..00000000000 --- a/vendor/github.com/ghodss/yaml/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/ghodss/yaml - -require gopkg.in/yaml.v2 v2.2.2 diff --git a/vendor/github.com/ghodss/yaml/go.sum b/vendor/github.com/ghodss/yaml/go.sum deleted file mode 100644 index bd555a333b2..00000000000 --- a/vendor/github.com/ghodss/yaml/go.sum +++ /dev/null @@ -1,3 +0,0 @@ -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index dfd264d6c5e..00000000000 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,326 +0,0 @@ -// Package yaml provides a wrapper around go-yaml designed to enable a better -// way of handling YAML when marshaling to and from structs. -// -// In short, this package first converts YAML to JSON using go-yaml and then -// uses json.Marshal and json.Unmarshal to convert to or from the struct. This -// means that it effectively reuses the JSON struct tags as well as the custom -// JSON methods MarshalJSON and UnmarshalJSON unlike go-yaml. -// -// See also http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang -// -package yaml // import "github.com/ghodss/yaml" - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return unmarshal(yaml.Unmarshal, y, o, opts) -} - -// UnmarshalStrict is like Unmarshal except that any mapping keys that are -// duplicates will result in an error. -// To also be strict about unknown fields, add the DisallowUnknownFields option. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return unmarshal(yaml.UnmarshalStrict, y, o, opts) -} - -func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o interface{}, opts []JSONOpt) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo, f) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the -// object, optionally applying decoder options prior to decoding. We are not -// using json.Unmarshal directly as we want the chance to pass in non-default -// options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) - for _, opt := range opts { - d = opt(d) - } - if err := d.Decode(&o); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) - } - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, -// passing JSON through this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -// -// For strict decoding of YAML, use YAMLToJSONStrict. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) -} - -// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, -// returning an error on any duplicate field names. -func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilities happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/vendor/github.com/ghodss/yaml/yaml_go110.go b/vendor/github.com/ghodss/yaml/yaml_go110.go deleted file mode 100644 index ab3e06a222a..00000000000 --- a/vendor/github.com/ghodss/yaml/yaml_go110.go +++ /dev/null @@ -1,14 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -// +build go1.10 - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} diff --git a/vendor/github.com/golang/mock/AUTHORS b/vendor/github.com/golang/mock/AUTHORS new file mode 100644 index 00000000000..660b8ccc8ae --- /dev/null +++ b/vendor/github.com/golang/mock/AUTHORS @@ -0,0 +1,12 @@ +# This is the official list of GoMock authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Alex Reece +Google Inc. diff --git a/vendor/github.com/golang/mock/CONTRIBUTORS b/vendor/github.com/golang/mock/CONTRIBUTORS new file mode 100644 index 00000000000..def849cab1b --- /dev/null +++ b/vendor/github.com/golang/mock/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute (and typically +# have contributed) code to the gomock repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Aaron Jacobs +Alex Reece +David Symonds +Ryan Barrett diff --git a/vendor/github.com/golang/mock/LICENSE b/vendor/github.com/golang/mock/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/golang/mock/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/mock/gomock/call.go b/vendor/github.com/golang/mock/gomock/call.go new file mode 100644 index 00000000000..b18cc2d6147 --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/call.go @@ -0,0 +1,433 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +// Call represents an expected call to a mock. +type Call struct { + t TestHelper // for triggering test failures on invalid call setup + + receiver interface{} // the receiver of the method call + method string // the name of the method + methodType reflect.Type // the type of the method + args []Matcher // the args + origin string // file and line number of call setup + + preReqs []*Call // prerequisite calls + + // Expectations + minCalls, maxCalls int + + numCalls int // actual number made + + // actions are called when this Call is called. Each action gets the args and + // can set the return values by returning a non-nil slice. Actions run in the + // order they are created. + actions []func([]interface{}) []interface{} +} + +// newCall creates a *Call. It requires the method type in order to support +// unexported methods. +func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { + t.Helper() + + // TODO: check arity, types. + margs := make([]Matcher, len(args)) + for i, arg := range args { + if m, ok := arg.(Matcher); ok { + margs[i] = m + } else if arg == nil { + // Handle nil specially so that passing a nil interface value + // will match the typed nils of concrete args. + margs[i] = Nil() + } else { + margs[i] = Eq(arg) + } + } + + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. + origin := callerInfo(3) + actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { + // Synthesize the zero value for each of the return args' types. + rets := make([]interface{}, methodType.NumOut()) + for i := 0; i < methodType.NumOut(); i++ { + rets[i] = reflect.Zero(methodType.Out(i)).Interface() + } + return rets + }} + return &Call{t: t, receiver: receiver, method: method, methodType: methodType, + args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} +} + +// AnyTimes allows the expectation to be called 0 or more times +func (c *Call) AnyTimes() *Call { + c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity + return c +} + +// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called or if MaxTimes +// was previously called with 1, MinTimes also sets the maximum number of calls to infinity. +func (c *Call) MinTimes(n int) *Call { + c.minCalls = n + if c.maxCalls == 1 { + c.maxCalls = 1e8 + } + return c +} + +// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called or if MinTimes was +// previously called with 1, MaxTimes also sets the minimum number of calls to 0. +func (c *Call) MaxTimes(n int) *Call { + c.maxCalls = n + if c.minCalls == 1 { + c.minCalls = 0 + } + return c +} + +// DoAndReturn declares the action to run when the call is matched. +// The return values from this function are returned by the mocked function. +// It takes an interface{} argument to support n-arity functions. +func (c *Call) DoAndReturn(f interface{}) *Call { + // TODO: Check arity and types here, rather than dying badly elsewhere. + v := reflect.ValueOf(f) + + c.addAction(func(args []interface{}) []interface{} { + vargs := make([]reflect.Value, len(args)) + ft := v.Type() + for i := 0; i < len(args); i++ { + if args[i] != nil { + vargs[i] = reflect.ValueOf(args[i]) + } else { + // Use the zero value for the arg. + vargs[i] = reflect.Zero(ft.In(i)) + } + } + vrets := v.Call(vargs) + rets := make([]interface{}, len(vrets)) + for i, ret := range vrets { + rets[i] = ret.Interface() + } + return rets + }) + return c +} + +// Do declares the action to run when the call is matched. The function's +// return values are ignored to retain backward compatibility. To use the +// return values call DoAndReturn. +// It takes an interface{} argument to support n-arity functions. +func (c *Call) Do(f interface{}) *Call { + // TODO: Check arity and types here, rather than dying badly elsewhere. + v := reflect.ValueOf(f) + + c.addAction(func(args []interface{}) []interface{} { + vargs := make([]reflect.Value, len(args)) + ft := v.Type() + for i := 0; i < len(args); i++ { + if args[i] != nil { + vargs[i] = reflect.ValueOf(args[i]) + } else { + // Use the zero value for the arg. + vargs[i] = reflect.Zero(ft.In(i)) + } + } + v.Call(vargs) + return nil + }) + return c +} + +// Return declares the values to be returned by the mocked function call. +func (c *Call) Return(rets ...interface{}) *Call { + c.t.Helper() + + mt := c.methodType + if len(rets) != mt.NumOut() { + c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, len(rets), mt.NumOut(), c.origin) + } + for i, ret := range rets { + if got, want := reflect.TypeOf(ret), mt.Out(i); got == want { + // Identical types; nothing to do. + } else if got == nil { + // Nil needs special handling. + switch want.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + // ok + default: + c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]", + i, c.receiver, c.method, want, c.origin) + } + } else if got.AssignableTo(want) { + // Assignable type relation. Make the assignment now so that the generated code + // can return the values with a type assertion. + v := reflect.New(want).Elem() + v.Set(reflect.ValueOf(ret)) + rets[i] = v.Interface() + } else { + c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]", + i, c.receiver, c.method, got, want, c.origin) + } + } + + c.addAction(func([]interface{}) []interface{} { + return rets + }) + + return c +} + +// Times declares the exact number of times a function call is expected to be executed. +func (c *Call) Times(n int) *Call { + c.minCalls, c.maxCalls = n, n + return c +} + +// SetArg declares an action that will set the nth argument's value, +// indirected through a pointer. Or, in the case of a slice, SetArg +// will copy value's elements into the nth argument. +func (c *Call) SetArg(n int, value interface{}) *Call { + c.t.Helper() + + mt := c.methodType + // TODO: This will break on variadic methods. + // We will need to check those at invocation time. + if n < 0 || n >= mt.NumIn() { + c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]", + n, mt.NumIn(), c.origin) + } + // Permit setting argument through an interface. + // In the interface case, we don't (nay, can't) check the type here. + at := mt.In(n) + switch at.Kind() { + case reflect.Ptr: + dt := at.Elem() + if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) { + c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]", + n, vt, dt, c.origin) + } + case reflect.Interface: + // nothing to do + case reflect.Slice: + // nothing to do + default: + c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]", + n, at, c.origin) + } + + c.addAction(func(args []interface{}) []interface{} { + v := reflect.ValueOf(value) + switch reflect.TypeOf(args[n]).Kind() { + case reflect.Slice: + setSlice(args[n], v) + default: + reflect.ValueOf(args[n]).Elem().Set(v) + } + return nil + }) + return c +} + +// isPreReq returns true if other is a direct or indirect prerequisite to c. +func (c *Call) isPreReq(other *Call) bool { + for _, preReq := range c.preReqs { + if other == preReq || preReq.isPreReq(other) { + return true + } + } + return false +} + +// After declares that the call may only match after preReq has been exhausted. +func (c *Call) After(preReq *Call) *Call { + c.t.Helper() + + if c == preReq { + c.t.Fatalf("A call isn't allowed to be its own prerequisite") + } + if preReq.isPreReq(c) { + c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq) + } + + c.preReqs = append(c.preReqs, preReq) + return c +} + +// Returns true if the minimum number of calls have been made. +func (c *Call) satisfied() bool { + return c.numCalls >= c.minCalls +} + +// Returns true if the maximum number of calls have been made. +func (c *Call) exhausted() bool { + return c.numCalls >= c.maxCalls +} + +func (c *Call) String() string { + args := make([]string, len(c.args)) + for i, arg := range c.args { + args[i] = arg.String() + } + arguments := strings.Join(args, ", ") + return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin) +} + +// Tests if the given call matches the expected call. +// If yes, returns nil. If no, returns error with message explaining why it does not match. +func (c *Call) matches(args []interface{}) error { + if !c.methodType.IsVariadic() { + if len(args) != len(c.args) { + return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", + c.origin, len(args), len(c.args)) + } + + for i, m := range c.args { + if !m.Matches(args[i]) { + return fmt.Errorf( + "expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v", + c.origin, i, formatGottenArg(m, args[i]), m, + ) + } + } + } else { + if len(c.args) < c.methodType.NumIn()-1 { + return fmt.Errorf("expected call at %s has the wrong number of matchers. Got: %d, want: %d", + c.origin, len(c.args), c.methodType.NumIn()-1) + } + if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) { + return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", + c.origin, len(args), len(c.args)) + } + if len(args) < len(c.args)-1 { + return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d", + c.origin, len(args), len(c.args)-1) + } + + for i, m := range c.args { + if i < c.methodType.NumIn()-1 { + // Non-variadic args + if !m.Matches(args[i]) { + return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m) + } + continue + } + // The last arg has a possibility of a variadic argument, so let it branch + + // sample: Foo(a int, b int, c ...int) + if i < len(c.args) && i < len(args) { + if m.Matches(args[i]) { + // Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any()) + // Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher) + // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC) + // Got Foo(a, b) want Foo(matcherA, matcherB) + // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD) + continue + } + } + + // The number of actual args don't match the number of matchers, + // or the last matcher is a slice and the last arg is not. + // If this function still matches it is because the last matcher + // matches all the remaining arguments or the lack of any. + // Convert the remaining arguments, if any, into a slice of the + // expected type. + vargsType := c.methodType.In(c.methodType.NumIn() - 1) + vargs := reflect.MakeSlice(vargsType, 0, len(args)-i) + for _, arg := range args[i:] { + vargs = reflect.Append(vargs, reflect.ValueOf(arg)) + } + if m.Matches(vargs.Interface()) { + // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any()) + // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher) + // Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any()) + // Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher) + break + } + // Wrong number of matchers or not match. Fail. + // Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD) + // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD) + // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE) + // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD) + // Got Foo(a, b, c) want Foo(matcherA, matcherB) + + return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i]) + } + } + + // Check that all prerequisite calls have been satisfied. + for _, preReqCall := range c.preReqs { + if !preReqCall.satisfied() { + return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", + c.origin, preReqCall, c) + } + } + + // Check that the call is not exhausted. + if c.exhausted() { + return fmt.Errorf("expected call at %s has already been called the max number of times", c.origin) + } + + return nil +} + +// dropPrereqs tells the expected Call to not re-check prerequisite calls any +// longer, and to return its current set. +func (c *Call) dropPrereqs() (preReqs []*Call) { + preReqs = c.preReqs + c.preReqs = nil + return +} + +func (c *Call) call() []func([]interface{}) []interface{} { + c.numCalls++ + return c.actions +} + +// InOrder declares that the given calls should occur in order. +func InOrder(calls ...*Call) { + for i := 1; i < len(calls); i++ { + calls[i].After(calls[i-1]) + } +} + +func setSlice(arg interface{}, v reflect.Value) { + va := reflect.ValueOf(arg) + for i := 0; i < v.Len(); i++ { + va.Index(i).Set(v.Index(i)) + } +} + +func (c *Call) addAction(action func([]interface{}) []interface{}) { + c.actions = append(c.actions, action) +} + +func formatGottenArg(m Matcher, arg interface{}) string { + got := fmt.Sprintf("%v", arg) + if gs, ok := m.(GotFormatter); ok { + got = gs.Got(arg) + } + return got +} diff --git a/vendor/github.com/golang/mock/gomock/callset.go b/vendor/github.com/golang/mock/gomock/callset.go new file mode 100644 index 00000000000..e4e85d602f8 --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/callset.go @@ -0,0 +1,112 @@ +// Copyright 2011 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "bytes" + "fmt" +) + +// callSet represents a set of expected calls, indexed by receiver and method +// name. +type callSet struct { + // Calls that are still expected. + expected map[callSetKey][]*Call + // Calls that have been exhausted. + exhausted map[callSetKey][]*Call +} + +// callSetKey is the key in the maps in callSet +type callSetKey struct { + receiver interface{} + fname string +} + +func newCallSet() *callSet { + return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)} +} + +// Add adds a new expected call. +func (cs callSet) Add(call *Call) { + key := callSetKey{call.receiver, call.method} + m := cs.expected + if call.exhausted() { + m = cs.exhausted + } + m[key] = append(m[key], call) +} + +// Remove removes an expected call. +func (cs callSet) Remove(call *Call) { + key := callSetKey{call.receiver, call.method} + calls := cs.expected[key] + for i, c := range calls { + if c == call { + // maintain order for remaining calls + cs.expected[key] = append(calls[:i], calls[i+1:]...) + cs.exhausted[key] = append(cs.exhausted[key], call) + break + } + } +} + +// FindMatch searches for a matching call. Returns error with explanation message if no call matched. +func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) { + key := callSetKey{receiver, method} + + // Search through the expected calls. + expected := cs.expected[key] + var callsErrors bytes.Buffer + for _, call := range expected { + err := call.matches(args) + if err != nil { + _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) + } else { + return call, nil + } + } + + // If we haven't found a match then search through the exhausted calls so we + // get useful error messages. + exhausted := cs.exhausted[key] + for _, call := range exhausted { + if err := call.matches(args); err != nil { + _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) + continue + } + _, _ = fmt.Fprintf( + &callsErrors, "all expected calls for method %q have been exhausted", method, + ) + } + + if len(expected)+len(exhausted) == 0 { + _, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method) + } + + return nil, fmt.Errorf(callsErrors.String()) +} + +// Failures returns the calls that are not satisfied. +func (cs callSet) Failures() []*Call { + failures := make([]*Call, 0, len(cs.expected)) + for _, calls := range cs.expected { + for _, call := range calls { + if !call.satisfied() { + failures = append(failures, call) + } + } + } + return failures +} diff --git a/vendor/github.com/golang/mock/gomock/controller.go b/vendor/github.com/golang/mock/gomock/controller.go new file mode 100644 index 00000000000..3b6569091b2 --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/controller.go @@ -0,0 +1,333 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gomock is a mock framework for Go. +// +// Standard usage: +// (1) Define an interface that you wish to mock. +// type MyInterface interface { +// SomeMethod(x int64, y string) +// } +// (2) Use mockgen to generate a mock from the interface. +// (3) Use the mock in a test: +// func TestMyThing(t *testing.T) { +// mockCtrl := gomock.NewController(t) +// defer mockCtrl.Finish() +// +// mockObj := something.NewMockMyInterface(mockCtrl) +// mockObj.EXPECT().SomeMethod(4, "blah") +// // pass mockObj to a real object and play with it. +// } +// +// By default, expected calls are not enforced to run in any particular order. +// Call order dependency can be enforced by use of InOrder and/or Call.After. +// Call.After can create more varied call order dependencies, but InOrder is +// often more convenient. +// +// The following examples create equivalent call order dependencies. +// +// Example of using Call.After to chain expected call order: +// +// firstCall := mockObj.EXPECT().SomeMethod(1, "first") +// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall) +// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall) +// +// Example of using InOrder to declare expected call order: +// +// gomock.InOrder( +// mockObj.EXPECT().SomeMethod(1, "first"), +// mockObj.EXPECT().SomeMethod(2, "second"), +// mockObj.EXPECT().SomeMethod(3, "third"), +// ) +package gomock + +import ( + "context" + "fmt" + "reflect" + "runtime" + "sync" +) + +// A TestReporter is something that can be used to report test failures. It +// is satisfied by the standard library's *testing.T. +type TestReporter interface { + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) +} + +// TestHelper is a TestReporter that has the Helper method. It is satisfied +// by the standard library's *testing.T. +type TestHelper interface { + TestReporter + Helper() +} + +// cleanuper is used to check if TestHelper also has the `Cleanup` method. A +// common pattern is to pass in a `*testing.T` to +// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup +// method. This can be utilized to call `Finish()` so the caller of this library +// does not have to. +type cleanuper interface { + Cleanup(func()) +} + +// A Controller represents the top-level control of a mock ecosystem. It +// defines the scope and lifetime of mock objects, as well as their +// expectations. It is safe to call Controller's methods from multiple +// goroutines. Each test should create a new Controller and invoke Finish via +// defer. +// +// func TestFoo(t *testing.T) { +// ctrl := gomock.NewController(t) +// defer ctrl.Finish() +// // .. +// } +// +// func TestBar(t *testing.T) { +// t.Run("Sub-Test-1", st) { +// ctrl := gomock.NewController(st) +// defer ctrl.Finish() +// // .. +// }) +// t.Run("Sub-Test-2", st) { +// ctrl := gomock.NewController(st) +// defer ctrl.Finish() +// // .. +// }) +// }) +type Controller struct { + // T should only be called within a generated mock. It is not intended to + // be used in user code and may be changed in future versions. T is the + // TestReporter passed in when creating the Controller via NewController. + // If the TestReporter does not implement a TestHelper it will be wrapped + // with a nopTestHelper. + T TestHelper + mu sync.Mutex + expectedCalls *callSet + finished bool +} + +// NewController returns a new Controller. It is the preferred way to create a +// Controller. +// +// New in go1.14+, if you are passing a *testing.T into this function you no +// longer need to call ctrl.Finish() in your test methods +func NewController(t TestReporter) *Controller { + h, ok := t.(TestHelper) + if !ok { + h = &nopTestHelper{t} + } + ctrl := &Controller{ + T: h, + expectedCalls: newCallSet(), + } + if c, ok := isCleanuper(ctrl.T); ok { + c.Cleanup(func() { + ctrl.T.Helper() + ctrl.finish(true, nil) + }) + } + + return ctrl +} + +type cancelReporter struct { + t TestHelper + cancel func() +} + +func (r *cancelReporter) Errorf(format string, args ...interface{}) { + r.t.Errorf(format, args...) +} +func (r *cancelReporter) Fatalf(format string, args ...interface{}) { + defer r.cancel() + r.t.Fatalf(format, args...) +} + +func (r *cancelReporter) Helper() { + r.t.Helper() +} + +// WithContext returns a new Controller and a Context, which is cancelled on any +// fatal failure. +func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) { + h, ok := t.(TestHelper) + if !ok { + h = &nopTestHelper{t: t} + } + + ctx, cancel := context.WithCancel(ctx) + return NewController(&cancelReporter{t: h, cancel: cancel}), ctx +} + +type nopTestHelper struct { + t TestReporter +} + +func (h *nopTestHelper) Errorf(format string, args ...interface{}) { + h.t.Errorf(format, args...) +} +func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { + h.t.Fatalf(format, args...) +} + +func (h nopTestHelper) Helper() {} + +// RecordCall is called by a mock. It should not be called by user code. +func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call { + ctrl.T.Helper() + + recv := reflect.ValueOf(receiver) + for i := 0; i < recv.Type().NumMethod(); i++ { + if recv.Type().Method(i).Name == method { + return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...) + } + } + ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver) + panic("unreachable") +} + +// RecordCallWithMethodType is called by a mock. It should not be called by user code. +func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { + ctrl.T.Helper() + + call := newCall(ctrl.T, receiver, method, methodType, args...) + + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + ctrl.expectedCalls.Add(call) + + return call +} + +// Call is called by a mock. It should not be called by user code. +func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} { + ctrl.T.Helper() + + // Nest this code so we can use defer to make sure the lock is released. + actions := func() []func([]interface{}) []interface{} { + ctrl.T.Helper() + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + + expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args) + if err != nil { + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test. + origin := callerInfo(3) + ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err) + } + + // Two things happen here: + // * the matching call no longer needs to check prerequite calls, + // * and the prerequite calls are no longer expected, so remove them. + preReqCalls := expected.dropPrereqs() + for _, preReqCall := range preReqCalls { + ctrl.expectedCalls.Remove(preReqCall) + } + + actions := expected.call() + if expected.exhausted() { + ctrl.expectedCalls.Remove(expected) + } + return actions + }() + + var rets []interface{} + for _, action := range actions { + if r := action(args); r != nil { + rets = r + } + } + + return rets +} + +// Finish checks to see if all the methods that were expected to be called +// were called. It should be invoked for each Controller. It is not idempotent +// and therefore can only be invoked once. +func (ctrl *Controller) Finish() { + // If we're currently panicking, probably because this is a deferred call. + // This must be recovered in the deferred function. + err := recover() + ctrl.finish(false, err) +} + +func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { + ctrl.T.Helper() + + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + + if ctrl.finished { + if _, ok := isCleanuper(ctrl.T); !ok { + ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + } + return + } + ctrl.finished = true + + // Short-circuit, pass through the panic. + if panicErr != nil { + panic(panicErr) + } + + // Check that all remaining expected calls are satisfied. + failures := ctrl.expectedCalls.Failures() + for _, call := range failures { + ctrl.T.Errorf("missing call(s) to %v", call) + } + if len(failures) != 0 { + if !cleanup { + ctrl.T.Fatalf("aborting test due to missing call(s)") + return + } + ctrl.T.Errorf("aborting test due to missing call(s)") + } +} + +// callerInfo returns the file:line of the call site. skip is the number +// of stack frames to skip when reporting. 0 is callerInfo's call site. +func callerInfo(skip int) string { + if _, file, line, ok := runtime.Caller(skip + 1); ok { + return fmt.Sprintf("%s:%d", file, line) + } + return "unknown file" +} + +// isCleanuper checks it if t's base TestReporter has a Cleanup method. +func isCleanuper(t TestReporter) (cleanuper, bool) { + tr := unwrapTestReporter(t) + c, ok := tr.(cleanuper) + return c, ok +} + +// unwrapTestReporter unwraps TestReporter to the base implementation. +func unwrapTestReporter(t TestReporter) TestReporter { + tr := t + switch nt := t.(type) { + case *cancelReporter: + tr = nt.t + if h, check := tr.(*nopTestHelper); check { + tr = h.t + } + case *nopTestHelper: + tr = nt.t + default: + // not wrapped + } + return tr +} diff --git a/vendor/github.com/golang/mock/gomock/matchers.go b/vendor/github.com/golang/mock/gomock/matchers.go new file mode 100644 index 00000000000..770aba5a310 --- /dev/null +++ b/vendor/github.com/golang/mock/gomock/matchers.go @@ -0,0 +1,269 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "fmt" + "reflect" + "strings" +) + +// A Matcher is a representation of a class of values. +// It is used to represent the valid or expected arguments to a mocked method. +type Matcher interface { + // Matches returns whether x is a match. + Matches(x interface{}) bool + + // String describes what the matcher matches. + String() string +} + +// WantFormatter modifies the given Matcher's String() method to the given +// Stringer. This allows for control on how the "Want" is formatted when +// printing . +func WantFormatter(s fmt.Stringer, m Matcher) Matcher { + type matcher interface { + Matches(x interface{}) bool + } + + return struct { + matcher + fmt.Stringer + }{ + matcher: m, + Stringer: s, + } +} + +// StringerFunc type is an adapter to allow the use of ordinary functions as +// a Stringer. If f is a function with the appropriate signature, +// StringerFunc(f) is a Stringer that calls f. +type StringerFunc func() string + +// String implements fmt.Stringer. +func (f StringerFunc) String() string { + return f() +} + +// GotFormatter is used to better print failure messages. If a matcher +// implements GotFormatter, it will use the result from Got when printing +// the failure message. +type GotFormatter interface { + // Got is invoked with the received value. The result is used when + // printing the failure message. + Got(got interface{}) string +} + +// GotFormatterFunc type is an adapter to allow the use of ordinary +// functions as a GotFormatter. If f is a function with the appropriate +// signature, GotFormatterFunc(f) is a GotFormatter that calls f. +type GotFormatterFunc func(got interface{}) string + +// Got implements GotFormatter. +func (f GotFormatterFunc) Got(got interface{}) string { + return f(got) +} + +// GotFormatterAdapter attaches a GotFormatter to a Matcher. +func GotFormatterAdapter(s GotFormatter, m Matcher) Matcher { + return struct { + GotFormatter + Matcher + }{ + GotFormatter: s, + Matcher: m, + } +} + +type anyMatcher struct{} + +func (anyMatcher) Matches(interface{}) bool { + return true +} + +func (anyMatcher) String() string { + return "is anything" +} + +type eqMatcher struct { + x interface{} +} + +func (e eqMatcher) Matches(x interface{}) bool { + // In case, some value is nil + if e.x == nil || x == nil { + return reflect.DeepEqual(e.x, x) + } + + // Check if types assignable and convert them to common type + x1Val := reflect.ValueOf(e.x) + x2Val := reflect.ValueOf(x) + + if x1Val.Type().AssignableTo(x2Val.Type()) { + x1ValConverted := x1Val.Convert(x2Val.Type()) + return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface()) + } + + return false +} + +func (e eqMatcher) String() string { + return fmt.Sprintf("is equal to %v", e.x) +} + +type nilMatcher struct{} + +func (nilMatcher) Matches(x interface{}) bool { + if x == nil { + return true + } + + v := reflect.ValueOf(x) + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice: + return v.IsNil() + } + + return false +} + +func (nilMatcher) String() string { + return "is nil" +} + +type notMatcher struct { + m Matcher +} + +func (n notMatcher) Matches(x interface{}) bool { + return !n.m.Matches(x) +} + +func (n notMatcher) String() string { + // TODO: Improve this if we add a NotString method to the Matcher interface. + return "not(" + n.m.String() + ")" +} + +type assignableToTypeOfMatcher struct { + targetType reflect.Type +} + +func (m assignableToTypeOfMatcher) Matches(x interface{}) bool { + return reflect.TypeOf(x).AssignableTo(m.targetType) +} + +func (m assignableToTypeOfMatcher) String() string { + return "is assignable to " + m.targetType.Name() +} + +type allMatcher struct { + matchers []Matcher +} + +func (am allMatcher) Matches(x interface{}) bool { + for _, m := range am.matchers { + if !m.Matches(x) { + return false + } + } + return true +} + +func (am allMatcher) String() string { + ss := make([]string, 0, len(am.matchers)) + for _, matcher := range am.matchers { + ss = append(ss, matcher.String()) + } + return strings.Join(ss, "; ") +} + +type lenMatcher struct { + i int +} + +func (m lenMatcher) Matches(x interface{}) bool { + v := reflect.ValueOf(x) + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == m.i + default: + return false + } +} + +func (m lenMatcher) String() string { + return fmt.Sprintf("has length %d", m.i) +} + +// Constructors + +// All returns a composite Matcher that returns true if and only all of the +// matchers return true. +func All(ms ...Matcher) Matcher { return allMatcher{ms} } + +// Any returns a matcher that always matches. +func Any() Matcher { return anyMatcher{} } + +// Eq returns a matcher that matches on equality. +// +// Example usage: +// Eq(5).Matches(5) // returns true +// Eq(5).Matches(4) // returns false +func Eq(x interface{}) Matcher { return eqMatcher{x} } + +// Len returns a matcher that matches on length. This matcher returns false if +// is compared to a type that is not an array, chan, map, slice, or string. +func Len(i int) Matcher { + return lenMatcher{i} +} + +// Nil returns a matcher that matches if the received value is nil. +// +// Example usage: +// var x *bytes.Buffer +// Nil().Matches(x) // returns true +// x = &bytes.Buffer{} +// Nil().Matches(x) // returns false +func Nil() Matcher { return nilMatcher{} } + +// Not reverses the results of its given child matcher. +// +// Example usage: +// Not(Eq(5)).Matches(4) // returns true +// Not(Eq(5)).Matches(5) // returns false +func Not(x interface{}) Matcher { + if m, ok := x.(Matcher); ok { + return notMatcher{m} + } + return notMatcher{Eq(x)} +} + +// AssignableToTypeOf is a Matcher that matches if the parameter to the mock +// function is assignable to the type of the parameter to this function. +// +// Example usage: +// var s fmt.Stringer = &bytes.Buffer{} +// AssignableToTypeOf(s).Matches(time.Second) // returns true +// AssignableToTypeOf(s).Matches(99) // returns false +// +// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() +// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true +func AssignableToTypeOf(x interface{}) Matcher { + if xt, ok := x.(reflect.Type); ok { + return assignableToTypeOfMatcher{xt} + } + return assignableToTypeOfMatcher{reflect.TypeOf(x)} +} diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml deleted file mode 100644 index 24b80388f72..00000000000 --- a/vendor/github.com/hashicorp/go-multierror/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - -branches: - only: - - master - -script: env GO111MODULE=on make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index e92fa614cd6..71dd308ed81 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,10 +1,11 @@ # go-multierror -[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) -[travis]: https://travis-ci.org/hashicorp/go-multierror -[godocs]: https://godoc.org/github.com/hashicorp/go-multierror +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. @@ -24,7 +25,25 @@ for introspecting on error values. Install using `go get github.com/hashicorp/go-multierror`. Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` ## Usage diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go index 775b6e753e7..3e2589bfde0 100644 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -6,6 +6,8 @@ package multierror // If err is not a multierror.Error, then it will be turned into // one. If any of the errs are multierr.Error, they will be flattened // one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. func Append(err error, errs ...error) *Error { switch err := err.(type) { case *Error: diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod index 0afe8e6f9d6..141cc4ccb25 100644 --- a/vendor/github.com/hashicorp/go-multierror/go.mod +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -1,5 +1,5 @@ module github.com/hashicorp/go-multierror -go 1.14 +go 1.13 require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index d05dd926987..f5457432646 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -40,14 +40,17 @@ func (e *Error) GoString() string { return fmt.Sprintf("*%#v", *e) } -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementation of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. // -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implemented only to -// satisfy the errwrap.Wrapper interface. +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } return e.Errors } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 72887abe55b..c9d7eb41e3d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1773,6 +1773,8 @@ const ( NFPROTO_NUMPROTO = 0xd ) +const SO_ORIGINAL_DST = 0x50 + type Nfgenmsg struct { Nfgen_family uint8 Version uint8 diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1f733398ee4..17f03312df1 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -680,7 +680,7 @@ const ( WTD_CHOICE_CERT = 5 WTD_STATEACTION_IGNORE = 0x00000000 - WTD_STATEACTION_VERIFY = 0x00000010 + WTD_STATEACTION_VERIFY = 0x00000001 WTD_STATEACTION_CLOSE = 0x00000002 WTD_STATEACTION_AUTO_CACHE = 0x00000003 WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004 diff --git a/vendor/modules.txt b/vendor/modules.txt index edf13eed72e..ef8c2e3085c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -92,7 +92,7 @@ github.com/deislabs/oras/pkg/auth/docker github.com/deislabs/oras/pkg/content github.com/deislabs/oras/pkg/context github.com/deislabs/oras/pkg/oras -# github.com/devfile/api/v2 v2.0.0-20210211160219-33a78aec06af +# github.com/devfile/api/v2 v2.0.0-20211021164004-dabee4e633ed ## explicit github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2 github.com/devfile/api/v2/pkg/attributes @@ -100,7 +100,8 @@ github.com/devfile/api/v2/pkg/devfile github.com/devfile/api/v2/pkg/utils/overriding github.com/devfile/api/v2/pkg/utils/unions github.com/devfile/api/v2/pkg/validation -# github.com/devfile/library v1.0.0-alpha.2 +github.com/devfile/api/v2/pkg/validation/variables +# github.com/devfile/library v1.2.1-0.20211104222135-49d635cb492f ## explicit github.com/devfile/library/pkg/devfile github.com/devfile/library/pkg/devfile/generator @@ -110,13 +111,17 @@ github.com/devfile/library/pkg/devfile/parser/data github.com/devfile/library/pkg/devfile/parser/data/v2 github.com/devfile/library/pkg/devfile/parser/data/v2/2.0.0 github.com/devfile/library/pkg/devfile/parser/data/v2/2.1.0 +github.com/devfile/library/pkg/devfile/parser/data/v2/2.2.0 github.com/devfile/library/pkg/devfile/parser/data/v2/common github.com/devfile/library/pkg/devfile/validate github.com/devfile/library/pkg/testingutil/filesystem github.com/devfile/library/pkg/util -# github.com/devfile/registry-support/index/generator v0.0.0-20210505173027-d06fe2bb3ee8 +# github.com/devfile/registry-support/index/generator v0.0.0-20211012185733-0a73f866043f ## explicit github.com/devfile/registry-support/index/generator/schema +# github.com/devfile/registry-support/registry-library v0.0.0-20211026200306-cab748834109 +## explicit +github.com/devfile/registry-support/registry-library/library # github.com/docker/cli v20.10.5+incompatible github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -179,7 +184,7 @@ github.com/exponent-io/jsonpath # github.com/fatih/color v1.10.0 github.com/fatih/color # github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 -github.com/ghodss/yaml +## explicit # github.com/go-errors/errors v1.0.1 github.com/go-errors/errors # github.com/go-logr/logr v0.4.0 @@ -208,6 +213,8 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/groupcache/lru +# github.com/golang/mock v1.5.0 +github.com/golang/mock/gomock # github.com/golang/protobuf v1.5.2 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -264,7 +271,7 @@ github.com/gregjones/httpcache github.com/gregjones/httpcache/diskcache # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap -# github.com/hashicorp/go-multierror v1.1.0 +# github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror # github.com/huandu/xstrings v1.3.1 github.com/huandu/xstrings @@ -321,7 +328,7 @@ github.com/modern-go/reflect2 github.com/monochromegane/go-gitignore # github.com/morikuni/aec v1.0.0 github.com/morikuni/aec -# github.com/onsi/gomega v1.13.0 +# github.com/onsi/gomega v1.14.0 github.com/onsi/gomega/gstruct/errors github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 @@ -464,7 +471,7 @@ golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 +# golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 @@ -663,7 +670,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.21.1 +# k8s.io/apiextensions-apiserver v0.21.3 ## explicit k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -724,7 +731,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.21.1 +# k8s.io/apiserver v0.21.3 k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/endpoints/deprecation # k8s.io/cli-runtime v0.21.0 @@ -914,7 +921,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.21.1 +# k8s.io/component-base v0.21.3 k8s.io/component-base/version # k8s.io/klog v1.0.0 ## explicit @@ -934,13 +941,13 @@ k8s.io/kubectl/pkg/util/openapi/validation k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 +# k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 k8s.io/utils/buffer k8s.io/utils/exec k8s.io/utils/integer k8s.io/utils/pointer k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.9.0 +# sigs.k8s.io/controller-runtime v0.9.5 ## explicit sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index bb66a6dfdda..2611a20c643 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -80,7 +80,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // (unstructured, partial, etc) // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds - _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort _, isPartialList := obj.(*metav1.PartialObjectMetadataList) if isPartial || isPartialList { // we require that the GVK be populated in order to recognize the object @@ -134,7 +134,7 @@ func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ return serializer } -//createRestConfig copies the base config and updates needed fields for a new rest config +// createRestConfig copies the base config and updates needed fields for a new rest config. func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { gv := gvk.GroupVersion() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go index 5e9a7b5f53d..56a00371ff8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -41,7 +41,7 @@ type dynamicRESTMapper struct { initOnce sync.Once } -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. type DynamicRESTMapperOption func(*dynamicRESTMapper) error // WithLimiter sets the RESTMapper's underlying limiter to lim. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 3444ab52b48..bbe36c4673f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -49,7 +49,7 @@ type WarningHandlerOptions struct { AllowDuplicateLogs bool } -// Options are creation options for a Client +// Options are creation options for a Client. type Options struct { // Scheme, if provided, will be used to map go structs to GroupVersionKinds Scheme *runtime.Scheme @@ -178,7 +178,7 @@ func (c *client) RESTMapper() meta.RESTMapper { return c.mapper } -// Create implements client.Client +// Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -190,7 +190,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e } } -// Update implements client.Client +// Update implements client.Client. func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -203,7 +203,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e } } -// Delete implements client.Client +// Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -215,7 +215,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e } } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -227,7 +227,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO } } -// Patch implements client.Client +// Patch implements client.Client. func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -240,7 +240,7 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat } } -// Get implements client.Client +// Get implements client.Client. func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { switch obj.(type) { case *unstructured.Unstructured: @@ -254,7 +254,7 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { } } -// List implements client.Client +// List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { switch x := obj.(type) { case *unstructured.UnstructuredList: @@ -288,20 +288,20 @@ func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) e } } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (c *client) Status() StatusWriter { return &statusWriter{client: c} } -// statusWriter is client.StatusWriter that writes status subresource +// statusWriter is client.StatusWriter that writes status subresource. type statusWriter struct { client *client } -// ensure statusWriter implements client.StatusWriter +// ensure statusWriter implements client.StatusWriter. var _ StatusWriter = &statusWriter{} -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -314,7 +314,7 @@ func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOp } } -// Patch implements client.Client +// Patch implements client.Client. func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go index b3493cb025f..857a0b38a72 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types +// clientCache creates and caches rest clients and metadata for Kubernetes types. type clientCache struct { // config is the rest.Config to talk to an apiserver config *rest.Config @@ -107,7 +107,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { return r, err } -// getObjMeta returns objMeta containing both type and object metadata and state +// getObjMeta returns objMeta containing both type and object metadata and state. func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { @@ -130,17 +130,17 @@ type resourceMeta struct { mapping *meta.RESTMapping } -// isNamespaced returns true if the type is namespaced +// isNamespaced returns true if the type is namespaced. func (r *resourceMeta) isNamespaced() bool { return r.mapping.Scope.Name() != meta.RESTScopeNameRoot } -// resource returns the resource name of the type +// resource returns the resource name of the type. func (r *resourceMeta) resource() string { return r.mapping.Resource.Resource } -// objMeta stores type and object information about a Kubernetes type +// objMeta stores type and object information about a Kubernetes type. type objMeta struct { // resourceMeta contains type information for the object *resourceMeta diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index 67e80e0551d..ea25ea25308 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -46,47 +46,47 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } -// Create implements client.Client +// Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) } -// Update implements client.Client +// Update implements client.Client. func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return c.client.Update(ctx, obj, append(opts, DryRunAll)...) } -// Delete implements client.Client +// Delete implements client.Client. func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) } -// Patch implements client.Client +// Patch implements client.Client. func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } -// Get implements client.Client +// Get implements client.Client. func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { return c.client.Get(ctx, key, obj) } -// List implements client.Client +// List implements client.Client. func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { return c.client.List(ctx, obj, opts...) } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (c *dryRunClient) Status() StatusWriter { return &dryRunStatusWriter{client: c.client.Status()} } -// ensure dryRunStatusWriter implements client.StatusWriter +// ensure dryRunStatusWriter implements client.StatusWriter. var _ StatusWriter = &dryRunStatusWriter{} // dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode @@ -95,12 +95,12 @@ type dryRunStatusWriter struct { client StatusWriter } -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) } -// Patch implements client.StatusWriter +// Patch implements client.StatusWriter. func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 0dfea4d6c57..58c2ece15b1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -30,7 +30,7 @@ import ( // ObjectKey identifies a Kubernetes Object. type ObjectKey = types.NamespacedName -// ObjectKeyFromObject returns the ObjectKey given a runtime.Object +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. func ObjectKeyFromObject(obj Object) ObjectKey { return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go index c0fc72c5b72..59747463a4a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -49,7 +49,7 @@ func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns s return mc.client.Resource(mapping.Resource).Namespace(ns), nil } -// Delete implements client.Client +// Delete implements client.Client. func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -67,7 +67,7 @@ func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...Delete return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -85,7 +85,7 @@ func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...D return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) } -// Patch implements client.Client +// Patch implements client.Client. func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -115,7 +115,7 @@ func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, op return nil } -// Get implements client.Client +// Get implements client.Client. func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -138,7 +138,7 @@ func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) er return nil } -// List implements client.Client +// List implements client.Client. func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadataList) if !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index cedcfb59614..d73cc5135a9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -86,7 +86,7 @@ func isNamespaced(c Client, obj runtime.Object) (bool, error) { scope := restmapping.Scope.Name() if scope == "" { - return false, errors.New("Scope cannot be identified. Empty scope returned") + return false, errors.New("scope cannot be identified, empty scope returned") } if scope != meta.RESTScopeNameRoot { @@ -95,7 +95,7 @@ func isNamespaced(c Client, obj runtime.Object) (bool, error) { return false, nil } -// Create implements clinet.Client +// Create implements clinet.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -104,7 +104,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -113,7 +113,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat return n.client.Create(ctx, obj, opts...) } -// Update implements client.Client +// Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -122,7 +122,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -131,7 +131,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat return n.client.Update(ctx, obj, opts...) } -// Delete implements client.Client +// Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -140,7 +140,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -149,7 +149,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet return n.client.Delete(ctx, obj, opts...) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -162,7 +162,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... return n.client.DeleteAllOf(ctx, obj, opts...) } -// Patch implements client.Client +// Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -171,7 +171,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -180,7 +180,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o return n.client.Patch(ctx, obj, patch, opts...) } -// Get implements client.Client +// Get implements client.Client. func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -188,14 +188,14 @@ func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) e } if isNamespaceScoped { if key.Namespace != "" && key.Namespace != n.namespace { - return fmt.Errorf("Namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) } key.Namespace = n.namespace } return n.client.Get(ctx, key, obj) } -// List implements client.Client +// List implements client.Client. func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { if n.namespace != "" { opts = append(opts, InNamespace(n.namespace)) @@ -203,12 +203,12 @@ func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...Lis return n.client.List(ctx, obj, opts...) } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (n *namespacedClient) Status() StatusWriter { return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} } -// ensure namespacedClientStatusWriter implements client.StatusWriter +// ensure namespacedClientStatusWriter implements client.StatusWriter. var _ StatusWriter = &namespacedClientStatusWriter{} type namespacedClientStatusWriter struct { @@ -217,7 +217,7 @@ type namespacedClientStatusWriter struct { namespacedclient Client } -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) if err != nil { @@ -226,7 +226,7 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, objectNamespace := obj.GetNamespace() if objectNamespace != nsw.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -235,7 +235,7 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, return nsw.StatusClient.Update(ctx, obj, opts...) } -// Patch implements client.StatusWriter +// Patch implements client.StatusWriter. func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) if err != nil { @@ -244,7 +244,7 @@ func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, objectNamespace := obj.GetNamespace() if objectNamespace != nsw.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) } if isNamespaceScoped && objectNamespace == "" { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index f2532764667..aa2299eac05 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -158,7 +158,7 @@ func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { return o } -// ApplyToCreate implements CreateOption +// ApplyToCreate implements CreateOption. func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { if o.DryRun != nil { co.DryRun = o.DryRun @@ -239,7 +239,7 @@ func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { var _ DeleteOption = &DeleteOptions{} -// ApplyToDelete implements DeleteOption +// ApplyToDelete implements DeleteOption. func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { if o.GracePeriodSeconds != nil { do.GracePeriodSeconds = o.GracePeriodSeconds @@ -349,7 +349,7 @@ type ListOptions struct { var _ ListOption = &ListOptions{} -// ApplyToList implements ListOption for ListOptions +// ApplyToList implements ListOption for ListOptions. func (o *ListOptions) ApplyToList(lo *ListOptions) { if o.LabelSelector != nil { lo.LabelSelector = o.LabelSelector @@ -569,7 +569,7 @@ func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { var _ UpdateOption = &UpdateOptions{} -// ApplyToUpdate implements UpdateOption +// ApplyToUpdate implements UpdateOption. func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { if o.DryRun != nil { uo.DryRun = o.DryRun @@ -636,7 +636,7 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { var _ PatchOption = &PatchOptions{} -// ApplyToPatch implements PatchOptions +// ApplyToPatch implements PatchOptions. func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { if o.DryRun != nil { po.DryRun = o.DryRun @@ -688,7 +688,7 @@ func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOf var _ DeleteAllOfOption = &DeleteAllOfOptions{} -// ApplyToDeleteAllOf implements DeleteAllOfOption +// ApplyToDeleteAllOf implements DeleteAllOfOption. func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { o.ApplyToList(&do.ListOptions) o.ApplyToDelete(&do.DeleteOptions) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index a1b32653ca6..dde7b21f258 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -33,7 +33,7 @@ type typedClient struct { paramCodec runtime.ParameterCodec } -// Create implements client.Client +// Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -51,7 +51,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti Into(obj) } -// Update implements client.Client +// Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -70,7 +70,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti Into(obj) } -// Delete implements client.Client +// Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -89,7 +89,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti Error() } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -108,7 +108,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet Error() } -// Patch implements client.Client +// Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -131,7 +131,7 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . Into(obj) } -// Get implements client.Client +// Get implements client.Client. func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { r, err := c.cache.getResource(obj) if err != nil { @@ -143,7 +143,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error Name(key.Name).Do(ctx).Into(obj) } -// List implements client.Client +// List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { r, err := c.cache.getResource(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index f8fb3ccec18..dcf15be275a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -36,7 +36,7 @@ type unstructuredClient struct { paramCodec runtime.ParameterCodec } -// Create implements client.Client +// Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -64,7 +64,7 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr return result } -// Update implements client.Client +// Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -93,7 +93,7 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up return result } -// Delete implements client.Client +// Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -116,7 +116,7 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De Error() } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -139,7 +139,7 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts Error() } -// Patch implements client.Client +// Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -167,7 +167,7 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch Into(obj) } -// Get implements client.Client +// Get implements client.Client. func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -193,7 +193,7 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object return result } -// List implements client.Client +// List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { u, ok := obj.(*unstructured.UnstructuredList) if !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index ed18ae6d116..bbd9c9c7563 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromis return res } -// WithValues provides a new Logger with the tags appended +// WithValues provides a new Logger with the tags appended. func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, @@ -74,7 +74,7 @@ func (p *loggerPromise) V(l *DelegatingLogger, level int) *loggerPromise { return res } -// Fulfill instantiates the Logger with the provided logger +// Fulfill instantiates the Logger with the provided logger. func (p *loggerPromise) Fulfill(parentLogger logr.Logger) { var logger = parentLogger if p.name != nil { @@ -163,7 +163,7 @@ func (l *DelegatingLogger) V(level int) logr.Logger { return res } -// WithName provides a new Logger with the name appended +// WithName provides a new Logger with the name appended. func (l *DelegatingLogger) WithName(name string) logr.Logger { l.lock.RLock() defer l.lock.RUnlock() @@ -179,7 +179,7 @@ func (l *DelegatingLogger) WithName(name string) logr.Logger { return res } -// WithValues provides a new Logger with the tags appended +// WithValues provides a new Logger with the tags appended. func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger { l.lock.RLock() defer l.lock.RUnlock() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go index 4c56f3427bb..09a5a02eb64 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go @@ -29,32 +29,32 @@ type NullLogger struct{} var _ logr.Logger = NullLogger{} -// Info implements logr.InfoLogger +// Info implements logr.InfoLogger. func (NullLogger) Info(_ string, _ ...interface{}) { // Do nothing. } -// Enabled implements logr.InfoLogger +// Enabled implements logr.InfoLogger. func (NullLogger) Enabled() bool { return false } -// Error implements logr.Logger +// Error implements logr.Logger. func (NullLogger) Error(_ error, _ string, _ ...interface{}) { // Do nothing. } -// V implements logr.Logger -func (log NullLogger) V(_ int) logr.InfoLogger { +// V implements logr.Logger. +func (log NullLogger) V(_ int) logr.Logger { return log } -// WithName implements logr.Logger +// WithName implements logr.Logger. func (log NullLogger) WithName(_ string) logr.Logger { return log } -// WithValues implements logr.Logger +// WithValues implements logr.Logger. func (log NullLogger) WithValues(_ ...interface{}) logr.Logger { return log } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go index d4ea12cebf8..3012fdd411e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -23,7 +23,7 @@ import ( ) // KubeAPIWarningLoggerOptions controls the behavior -// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger() +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). type KubeAPIWarningLoggerOptions struct { // Deduplicate indicates a given warning message should only be written once. // Setting this to true in a long-running process handling many warnings can