diff --git a/apis/go.mod b/apis/go.mod index 8deb5f4524c..f9c3928b596 100644 --- a/apis/go.mod +++ b/apis/go.mod @@ -4,6 +4,7 @@ go 1.18 require ( github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 + github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 k8s.io/api v0.24.1 k8s.io/apimachinery v0.24.1 ) diff --git a/apis/go.sum b/apis/go.sum index f3291b13514..7cd56d0b7ed 100644 --- a/apis/go.sum +++ b/apis/go.sum @@ -6,6 +6,9 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= @@ -20,6 +23,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -36,8 +40,11 @@ github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34 github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -65,11 +72,15 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -85,6 +96,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -98,16 +110,23 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 h1:bkBOsI/Yd+cBT+/aXkbbNo+imvq4VKRusoCluIGOBBg= github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -126,6 +145,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -139,6 +159,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -153,7 +174,11 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -171,14 +196,19 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -187,6 +217,7 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -199,8 +230,11 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -251,27 +285,37 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/apis/hive/v1/clusterdeployment_types.go b/apis/hive/v1/clusterdeployment_types.go index ec5431eef37..de10b119ed4 100644 --- a/apis/hive/v1/clusterdeployment_types.go +++ b/apis/hive/v1/clusterdeployment_types.go @@ -276,6 +276,10 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment. + // The Customization exists in the ClusterPool namespace. + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization,omitempty"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/apis/hive/v1/clusterdeploymentcustomization_types.go b/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..8917e756c51 --- /dev/null +++ b/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,99 @@ +package v1 + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // CustomizationApplyReasonSucceeded indicates that the customization + // worked properly on the last applied cluster deployment. + CustomizationApplyReasonSucceeded = "Succeeded" + // CustomizationApplyReasonBrokenSyntax indicates that Hive failed to apply + // customization patches on install-config. More details would be found in + // ApplySucceded condition message. + CustomizationApplyReasonBrokenSyntax = "BrokenBySyntax" + // CustomizationApplyReasonBrokenCloud indicates that cluster deployment provision has failed + // when using this customization. More details would be found in the ApplySucceeded condition message. + CustomizationApplyReasonBrokenCloud = "BrokenByCloud" + // CustomizationApplyReasonInstallationPending indicates that the customization patches have + // been successfully applied but provisioning is not completed yet. + CustomizationApplyReasonInstallationPending = "InstallationPending" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API. +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization. +type ClusterDeploymentCustomizationSpec struct { + // InstallConfigPatches is a list of patches to be applied to the install-config. + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// PatchEntity represent a json patch (RFC 6902) to be applied to the install-config +type PatchEntity struct { + // Op is the operation to perform: add, remove, replace, move, copy, test + // +required + Op string `json:"op"` + // Path is the json path to the value to be modified + // +required + Path string `json:"path"` + // From is the json path to copy or move the value from + // +optional + From string `json:"from,omitempty"` + // Value is the value to be used in the operation + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization. +type ClusterDeploymentCustomizationStatus struct { + // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on. + // +optional + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + + // ClusterPoolRef is the name of the current cluster pool the CDC used at. + // +optional + ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` + + // LastAppliedConfiguration contains the last applied patches to the install-config. + // The information will retain for reference in case the customization is updated. + // +optional + LastAppliedConfiguration string `json:"lastAppliedConfiguration,omitempty"` + + // Conditions describes the state of the operator's reconciliation functionality. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +const ( + ApplySucceededCondition conditionsv1.ConditionType = "ApplySucceeded" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations. +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/apis/hive/v1/clusterpool_types.go b/apis/hive/v1/clusterpool_types.go index 71e394a212c..37efd25a558 100644 --- a/apis/hive/v1/clusterpool_types.go +++ b/apis/hive/v1/clusterpool_types.go @@ -92,6 +92,11 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory maintains a list of entries consumed by the ClusterPool + // to customize the default ClusterDeployment. + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` } type HibernationConfig struct { @@ -110,6 +115,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind is the Kind of the inventory entry. +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment. +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +kubebuilder:default=ClusterDeploymentCustomization + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -197,6 +218,17 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" +) + +const ( + // InventoryReasonValid is used when all ClusterDeploymentCustomization are + // available and when used the ClusterDeployments are successfully installed. + InventoryReasonValid = "Valid" + // InventoryReasonInvalid is used when there is something wrong with ClusterDeploymentCustomization, for example + // patching issue, provisioning failure, missing, etc. + InventoryReasonInvalid = "Invalid" ) // +genclient diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index 2f96e248910..ac12a798fc7 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -7,6 +7,7 @@ package v1 import ( configv1 "github.com/openshift/api/config/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" agent "github.com/openshift/hive/apis/hive/v1/agent" alibabacloud "github.com/openshift/hive/apis/hive/v1/alibabacloud" aws "github.com/openshift/hive/apis/hive/v1/aws" @@ -676,6 +677,121 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ClusterPoolRef != nil { + in, out := &in.ClusterPoolRef, &out.ClusterPoolRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]conditionsv1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1341,6 +1457,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1413,6 +1534,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(HibernationConfig) **out = **in } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2500,6 +2626,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2989,6 +3131,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in diff --git a/apis/vendor/github.com/openshift/custom-resource-status/LICENSE b/apis/vendor/github.com/openshift/custom-resource-status/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go new file mode 100644 index 00000000000..7f98c60a063 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go @@ -0,0 +1,114 @@ +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// The return value indicates if this resulted in any changes *other than* LastHeartbeatTime. +func SetStatusCondition(conditions *[]Condition, newCondition Condition) bool { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return true + } + + changed := updateCondition(existingCondition, newCondition) + existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + return changed +} + +// SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition +// without setting lastHeartbeatTime. +// The return value indicates if this resulted in any changes. +func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) bool { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return true + } + + return updateCondition(existingCondition, newCondition) +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) { + if conditions == nil { + return + } + newConditions := []Condition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func updateCondition(existingCondition *Condition, newCondition Condition) bool { + changed := false + if existingCondition.Status != newCondition.Status { + changed = true + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + if existingCondition.Reason != newCondition.Reason { + changed = true + existingCondition.Reason = newCondition.Reason + } + if existingCondition.Message != newCondition.Message { + changed = true + existingCondition.Message = newCondition.Message + } + return changed +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue` +func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse` +func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse) +} + +// IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown` +func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go new file mode 100644 index 00000000000..b657efeaa65 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1 provides version v1 of the types and functions necessary to +// manage and inspect a slice of conditions. It is opinionated in the +// condition types provided but leaves it to the user to define additional +// types as necessary. +package v1 diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go new file mode 100644 index 00000000000..950678fb970 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go @@ -0,0 +1,51 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Condition represents the state of the operator's +// reconciliation functionality. +// +k8s:deepcopy-gen=true +type Condition struct { + Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."` + + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"` + + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"` +} + +// ConditionType is the state of the operator's reconciliation functionality. +type ConditionType string + +const ( + // ConditionAvailable indicates that the resources maintained by the operator, + // is functional and available in the cluster. + ConditionAvailable ConditionType = "Available" + + // ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the + // operator + ConditionProgressing ConditionType = "Progressing" + + // ConditionDegraded indicates that the resources maintained by the operator are not functioning completely. + // An example of a degraded state would be if not all pods in a deployment were running. + // It may still be available, but it is degraded + ConditionDegraded ConditionType = "Degraded" + + // ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade. + // When `False`, the resources maintained by the operator should not be upgraded and the + // message field should contain a human readable description of what the administrator should do to + // allow the operator to successfully update the resources maintained by the operator. + ConditionUpgradeable ConditionType = "Upgradeable" +) diff --git a/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bbbbf863d13 --- /dev/null +++ b/apis/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go @@ -0,0 +1,23 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} diff --git a/apis/vendor/modules.txt b/apis/vendor/modules.txt index bff48677e9c..ef96cc2d404 100644 --- a/apis/vendor/modules.txt +++ b/apis/vendor/modules.txt @@ -21,6 +21,9 @@ github.com/modern-go/reflect2 # github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 ## explicit; go 1.16 github.com/openshift/api/config/v1 +# github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 +## explicit; go 1.12 +github.com/openshift/custom-resource-status/conditions/v1 # golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd ## explicit; go 1.17 golang.org/x/net/http/httpguts diff --git a/cmd/hiveadmission/main.go b/cmd/hiveadmission/main.go index 3c8c243f23f..45fe777814c 100644 --- a/cmd/hiveadmission/main.go +++ b/cmd/hiveadmission/main.go @@ -30,6 +30,7 @@ func main() { hivevalidatingwebhooks.NewMachinePoolValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSyncSetValidatingAdmissionHook(decoder), hivevalidatingwebhooks.NewSelectorSyncSetValidatingAdmissionHook(decoder), + hivevalidatingwebhooks.NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder), ) } diff --git a/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml new file mode 100644 index 00000000000..776c1e1ef28 --- /dev/null +++ b/config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml @@ -0,0 +1,135 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io +spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired state + of ClusterDeploymentCustomization. + properties: + installConfigPatches: + description: InstallConfigPatches is a list of patches to be applied + to the install-config. + items: + description: PatchEntity represent a json patch (RFC 6902) to be + applied to the install-config + properties: + from: + description: From is the json path to copy or move the value + from + type: string + op: + description: 'Op is the operation to perform: add, remove, replace, + move, copy, test' + type: string + path: + description: Path is the json path to the value to be modified + type: string + value: + description: Value is the value to be used in the operation + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization. + properties: + clusterDeploymentRef: + description: ClusterDeploymentRef is a reference to the cluster deployment + that this customization is applied on. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterPoolRef: + description: ClusterPoolRef is the name of the current cluster pool + the CDC used at. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + conditions: + description: Conditions describes the state of the operator's reconciliation + functionality. + items: + description: Condition represents the state of the operator's reconciliation + functionality. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ConditionType is the state of the operator's reconciliation + functionality. + type: string + required: + - status + - type + type: object + type: array + lastAppliedConfiguration: + description: LastAppliedConfiguration contains the last applied patches + to the install-config. The information will retain for reference + in case the customization is updated. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index 54445dd4adb..26ddaf87f10 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -187,6 +187,16 @@ spec: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: CustomizationRef is the ClusterPool Inventory claimed + customization for this ClusterDeployment. The Customization + exists in the ClusterPool namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index afcc377be59..157e29b7e2f 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -152,6 +152,27 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: Inventory maintains a list of entries consumed by the + ClusterPool to customize the default ClusterDeployment. + items: + description: InventoryEntry maintains a reference to a custom resource + consumed by a clusterpool to customize the cluster deployment. + properties: + kind: + default: ClusterDeploymentCustomization + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is also + currently the only supported value. + enum: + - "" + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array labels: additionalProperties: type: string diff --git a/config/rbac/hive_admin_role.yaml b/config/rbac/hive_admin_role.yaml index cc7a0b075f3..d5a5ea8a560 100644 --- a/config/rbac/hive_admin_role.yaml +++ b/config/rbac/hive_admin_role.yaml @@ -37,6 +37,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates @@ -51,6 +52,7 @@ rules: - hiveconfigs - selectorsyncsets - selectorsyncidentityproviders + - clusterdeploymentcustomizations verbs: - get - list diff --git a/config/rbac/hive_reader_role.yaml b/config/rbac/hive_reader_role.yaml index 4cc8fa37ee8..b5ae7a48885 100644 --- a/config/rbac/hive_reader_role.yaml +++ b/config/rbac/hive_reader_role.yaml @@ -37,6 +37,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates diff --git a/go.mod b/go.mod index 4718e434e4b..07afc28f21e 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/heptio/velero v1.0.0 github.com/jonboulle/clockwork v0.2.2 github.com/json-iterator/go v1.1.12 - github.com/krishicks/yaml-patch v0.0.10 + github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e github.com/miekg/dns v1.1.35 github.com/modern-go/reflect2 v1.0.2 github.com/onsi/ginkgo v1.16.5 @@ -38,6 +38,7 @@ require ( github.com/openshift/cluster-api-provider-ibmcloud v0.0.1-0.20220201105455-8014e5e894b0 github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519 github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551 + github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 github.com/openshift/hive/apis v0.0.0 github.com/openshift/installer v0.9.0-master.0.20220711145509-cdb9627de7ef diff --git a/go.sum b/go.sum index 64c70818b04..f6409b9e2b2 100644 --- a/go.sum +++ b/go.sum @@ -827,8 +827,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E= -github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= +github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e h1:Vzi98BTVyd/EHyrs3ZINMfeWg4u1dd6h07E0AHZVYIs= +github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= github.com/kulti/thelper v0.4.0 h1:2Nx7XbdbE/BYZeoip2mURKUdtHQRuy6Ug+wR7K9ywNM= github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= @@ -1046,6 +1046,8 @@ github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850d github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519/go.mod h1:C7unCUThP8eqT4xQfbvg3oIDn2S9TYtb0wbBoH/SR2U= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551 h1:nGa6igwzG7smZOACUsovgf9XG8vT96Zdyc4H6r2rqS0= github.com/openshift/cluster-autoscaler-operator v0.0.0-20211006175002-fe524080b551/go.mod h1:72ieWchfTx9U7UbQO47vhSXBoCi2IJGZhXoCezan4EM= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= +github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 h1:y47BAJFepK8Xls1c+quIOyc46OXiT9LRiqGVjIaMlSA= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480/go.mod h1:OAHL5WnZphlhVEf5fTdeGLvNwMu1B2zCWpmxJpCA35o= github.com/openshift/installer v0.9.0-master.0.20220711145509-cdb9627de7ef h1:y3d9tfJqoKLsUwOJHi3iPBYmJe4Ukj8n19SkUsZbVUA= @@ -1309,6 +1311,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= @@ -1470,6 +1473,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1531,9 +1535,11 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1639,6 +1645,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1768,6 +1775,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 h1:hI3jKY4Hpf63ns040onEbB3dAkR/H/P83hw1TG8dD3Y= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1883,6 +1892,7 @@ k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= k8s.io/api v0.22.0-rc.0/go.mod h1:EUcKB6RvpW74HMRUSSNwpUzrIHBdGT1FeAvOV+txic0= k8s.io/api v0.22.0/go.mod h1:0AoXXqst47OI/L0oGKq9DG61dvGRPXs7X4/B7KyjBCU= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= @@ -1905,6 +1915,7 @@ k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCF k8s.io/apimachinery v0.22.0-rc.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= @@ -1936,6 +1947,7 @@ k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6g k8s.io/code-generator v0.22.0-rc.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.22.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/code-generator v0.24.1 h1:zS+dvmUNaOcvsQ4faV9hXNjsKG9/pQaLnts1Wma4RM8= k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= @@ -1969,6 +1981,8 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= @@ -1981,6 +1995,8 @@ k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kubectl v0.22.0/go.mod h1:eeuP92uZbVL2UnOaf0nj9OjtI0hi/nekHU+0isURer0= @@ -1997,6 +2013,7 @@ k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -2032,6 +2049,7 @@ sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aea sigs.k8s.io/controller-tools v0.6.3-0.20210916130746-94401651a6c3/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= sigs.k8s.io/controller-tools v0.9.0 h1:b/vSEPpA8hiMiyzDfLbZdCn3hoAcy3/868OHhYtHY9w= sigs.k8s.io/controller-tools v0.9.0/go.mod h1:NUkn8FTV3Sad3wWpSK7dt/145qfuQ8CKJV6j4jHC5rM= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc= diff --git a/hack/app-sre/kustomization.yaml b/hack/app-sre/kustomization.yaml index 5941b588da1..9c3ff4d672f 100644 --- a/hack/app-sre/kustomization.yaml +++ b/hack/app-sre/kustomization.yaml @@ -29,6 +29,7 @@ resources: - ../../config/crds/hive.openshift.io_selectorsyncsets.yaml - ../../config/crds/hive.openshift.io_syncidentityproviders.yaml - ../../config/crds/hive.openshift.io_syncsets.yaml +- ../../config/crds/hive.openshift.io_clusterdeploymentcustomizations.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization # Use app-sre-supplied variables to pull the image for the current commit diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index b914587293c..8f85e17d0f9 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -251,6 +251,142 @@ objects: plural: '' conditions: [] storedVersions: [] +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.0 + creationTimestamp: null + name: clusterdeploymentcustomizations.hive.openshift.io + spec: + group: hive.openshift.io + names: + kind: ClusterDeploymentCustomization + listKind: ClusterDeploymentCustomizationList + plural: clusterdeploymentcustomizations + singular: clusterdeploymentcustomization + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterDeploymentCustomizationSpec defines the desired + state of ClusterDeploymentCustomization. + properties: + installConfigPatches: + description: InstallConfigPatches is a list of patches to be applied + to the install-config. + items: + description: PatchEntity represent a json patch (RFC 6902) to + be applied to the install-config + properties: + from: + description: From is the json path to copy or move the value + from + type: string + op: + description: 'Op is the operation to perform: add, remove, + replace, move, copy, test' + type: string + path: + description: Path is the json path to the value to be modified + type: string + value: + description: Value is the value to be used in the operation + type: string + required: + - op + - path + - value + type: object + type: array + type: object + status: + description: ClusterDeploymentCustomizationStatus defines the observed + state of ClusterDeploymentCustomization. + properties: + clusterDeploymentRef: + description: ClusterDeploymentRef is a reference to the cluster + deployment that this customization is applied on. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + clusterPoolRef: + description: ClusterPoolRef is the name of the current cluster pool + the CDC used at. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + conditions: + description: Conditions describes the state of the operator's reconciliation + functionality. + items: + description: Condition represents the state of the operator's + reconciliation functionality. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + description: ConditionType is the state of the operator's + reconciliation functionality. + type: string + required: + - status + - type + type: object + type: array + lastAppliedConfiguration: + description: LastAppliedConfiguration contains the last applied + patches to the install-config. The information will retain for + reference in case the customization is updated. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -442,6 +578,16 @@ objects: belonging to ClusterPools. format: date-time type: string + clusterDeploymentCustomization: + description: CustomizationRef is the ClusterPool Inventory claimed + customization for this ClusterDeployment. The Customization + exists in the ClusterPool namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object namespace: description: Namespace is the namespace where the ClusterPool resides. @@ -1967,6 +2113,28 @@ objects: TODO: Add other useful fields. apiVersion, kind, uid?' type: string type: object + inventory: + description: Inventory maintains a list of entries consumed by the + ClusterPool to customize the default ClusterDeployment. + items: + description: InventoryEntry maintains a reference to a custom + resource consumed by a clusterpool to customize the cluster + deployment. + properties: + kind: + default: ClusterDeploymentCustomization + description: Kind denotes the kind of the referenced resource. + The default is ClusterDeploymentCustomization, which is + also currently the only supported value. + enum: + - '' + - ClusterDeploymentCustomization + type: string + name: + description: Name is the name of the referenced resource. + type: string + type: object + type: array labels: additionalProperties: type: string diff --git a/hack/e2e-common.sh b/hack/e2e-common.sh index efb7b670959..087bf419e01 100755 --- a/hack/e2e-common.sh +++ b/hack/e2e-common.sh @@ -213,6 +213,8 @@ function capture_manifests() { oc get clusterstate -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterstate.yaml" || true oc get dnszone -A -o yaml &> "${ARTIFACT_DIR}/hive_dnszones.yaml" || true oc get machinepool -A -o yaml &> "${ARTIFACT_DIR}/hive_machinepools.yaml" || true + oc get clusterdeploymentcustomization -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterdeploymentcustomization.yaml" || true + oc get clusterpool -A -o yaml &> "${ARTIFACT_DIR}/hive_clusterpool.yaml" || true # Don't get the contents of the secrets, since they're sensitive; hopefully just listing them will be helpful. oc get secrets -A &> "${ARTIFACT_DIR}/secret_list.txt" || true } diff --git a/hack/e2e-pool-test.sh b/hack/e2e-pool-test.sh index d1cdadc4481..4a22053acc5 100755 --- a/hack/e2e-pool-test.sh +++ b/hack/e2e-pool-test.sh @@ -23,6 +23,25 @@ spec: EOF } +function create_customization() { + local is_name=$1 + local ns=$2 + local cname=$3 + echo "Creating ClusterDeploymentCustomization $is_name" + oc apply -f -<1 pool size we would not only diff --git a/hack/requirements.txt b/hack/requirements.txt index 38dbcfd7bda..38a508ea6f9 100644 --- a/hack/requirements.txt +++ b/hack/requirements.txt @@ -1,2 +1,3 @@ GitPython PyYAML>=6.0 +yq>=4.0 diff --git a/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..8191f80c1c9 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/hive/apis/hive/v1" + scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterDeploymentCustomizationsGetter has a method to return a ClusterDeploymentCustomizationInterface. +// A group's client should implement this interface. +type ClusterDeploymentCustomizationsGetter interface { + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface +} + +// ClusterDeploymentCustomizationInterface has methods to work with ClusterDeploymentCustomization resources. +type ClusterDeploymentCustomizationInterface interface { + Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (*v1.ClusterDeploymentCustomization, error) + Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (*v1.ClusterDeploymentCustomization, error) + UpdateStatus(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (*v1.ClusterDeploymentCustomization, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterDeploymentCustomization, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterDeploymentCustomizationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) + ClusterDeploymentCustomizationExpansion +} + +// clusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type clusterDeploymentCustomizations struct { + client rest.Interface + ns string +} + +// newClusterDeploymentCustomizations returns a ClusterDeploymentCustomizations +func newClusterDeploymentCustomizations(c *HiveV1Client, namespace string) *clusterDeploymentCustomizations { + return &clusterDeploymentCustomizations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *clusterDeploymentCustomizations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *clusterDeploymentCustomizations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterDeploymentCustomizationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterDeploymentCustomizationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *clusterDeploymentCustomizations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.CreateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Post(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *clusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(clusterDeploymentCustomization.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *clusterDeploymentCustomizations) UpdateStatus(ctx context.Context, clusterDeploymentCustomization *v1.ClusterDeploymentCustomization, opts metav1.UpdateOptions) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(clusterDeploymentCustomization.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterDeploymentCustomization). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *clusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *clusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeploymentCustomization, err error) { + result = &v1.ClusterDeploymentCustomization{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("clusterdeploymentcustomizations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go new file mode 100644 index 00000000000..72969122200 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_clusterdeploymentcustomization.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterDeploymentCustomizations implements ClusterDeploymentCustomizationInterface +type FakeClusterDeploymentCustomizations struct { + Fake *FakeHiveV1 + ns string +} + +var clusterdeploymentcustomizationsResource = schema.GroupVersionResource{Group: "hive.openshift.io", Version: "v1", Resource: "clusterdeploymentcustomizations"} + +var clusterdeploymentcustomizationsKind = schema.GroupVersionKind{Group: "hive.openshift.io", Version: "v1", Kind: "ClusterDeploymentCustomization"} + +// Get takes name of the clusterDeploymentCustomization, and returns the corresponding clusterDeploymentCustomization object, and an error if there is any. +func (c *FakeClusterDeploymentCustomizations) Get(ctx context.Context, name string, options v1.GetOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(clusterdeploymentcustomizationsResource, c.ns, name), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// List takes label and field selectors, and returns the list of ClusterDeploymentCustomizations that match those selectors. +func (c *FakeClusterDeploymentCustomizations) List(ctx context.Context, opts v1.ListOptions) (result *hivev1.ClusterDeploymentCustomizationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(clusterdeploymentcustomizationsResource, clusterdeploymentcustomizationsKind, c.ns, opts), &hivev1.ClusterDeploymentCustomizationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &hivev1.ClusterDeploymentCustomizationList{ListMeta: obj.(*hivev1.ClusterDeploymentCustomizationList).ListMeta} + for _, item := range obj.(*hivev1.ClusterDeploymentCustomizationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterDeploymentCustomizations. +func (c *FakeClusterDeploymentCustomizations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(clusterdeploymentcustomizationsResource, c.ns, opts)) + +} + +// Create takes the representation of a clusterDeploymentCustomization and creates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Create(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.CreateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Update takes the representation of a clusterDeploymentCustomization and updates it. Returns the server's representation of the clusterDeploymentCustomization, and an error, if there is any. +func (c *FakeClusterDeploymentCustomizations) Update(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.UpdateOptions) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(clusterdeploymentcustomizationsResource, c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterDeploymentCustomizations) UpdateStatus(ctx context.Context, clusterDeploymentCustomization *hivev1.ClusterDeploymentCustomization, opts v1.UpdateOptions) (*hivev1.ClusterDeploymentCustomization, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(clusterdeploymentcustomizationsResource, "status", c.ns, clusterDeploymentCustomization), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} + +// Delete takes name of the clusterDeploymentCustomization and deletes it. Returns an error if one occurs. +func (c *FakeClusterDeploymentCustomizations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(clusterdeploymentcustomizationsResource, c.ns, name, opts), &hivev1.ClusterDeploymentCustomization{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterDeploymentCustomizations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(clusterdeploymentcustomizationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &hivev1.ClusterDeploymentCustomizationList{}) + return err +} + +// Patch applies the patch and returns the patched clusterDeploymentCustomization. +func (c *FakeClusterDeploymentCustomizations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *hivev1.ClusterDeploymentCustomization, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(clusterdeploymentcustomizationsResource, c.ns, name, pt, data, subresources...), &hivev1.ClusterDeploymentCustomization{}) + + if obj == nil { + return nil, err + } + return obj.(*hivev1.ClusterDeploymentCustomization), err +} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go index d52a518c108..06f12520fe7 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/fake/fake_hive_client.go @@ -24,6 +24,10 @@ func (c *FakeHiveV1) ClusterDeployments(namespace string) v1.ClusterDeploymentIn return &FakeClusterDeployments{c, namespace} } +func (c *FakeHiveV1) ClusterDeploymentCustomizations(namespace string) v1.ClusterDeploymentCustomizationInterface { + return &FakeClusterDeploymentCustomizations{c, namespace} +} + func (c *FakeHiveV1) ClusterDeprovisions(namespace string) v1.ClusterDeprovisionInterface { return &FakeClusterDeprovisions{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go index 600401a271d..951ab87652b 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go @@ -8,6 +8,8 @@ type ClusterClaimExpansion interface{} type ClusterDeploymentExpansion interface{} +type ClusterDeploymentCustomizationExpansion interface{} + type ClusterDeprovisionExpansion interface{} type ClusterImageSetExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go index 0652d984747..e0ea9fe9f1a 100644 --- a/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go +++ b/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go @@ -15,6 +15,7 @@ type HiveV1Interface interface { CheckpointsGetter ClusterClaimsGetter ClusterDeploymentsGetter + ClusterDeploymentCustomizationsGetter ClusterDeprovisionsGetter ClusterImageSetsGetter ClusterPoolsGetter @@ -48,6 +49,10 @@ func (c *HiveV1Client) ClusterDeployments(namespace string) ClusterDeploymentInt return newClusterDeployments(c, namespace) } +func (c *HiveV1Client) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationInterface { + return newClusterDeploymentCustomizations(c, namespace) +} + func (c *HiveV1Client) ClusterDeprovisions(namespace string) ClusterDeprovisionInterface { return newClusterDeprovisions(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 71f7068525e..296f8270454 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -44,6 +44,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterClaims().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeployments().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterdeploymentcustomizations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeploymentCustomizations().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterdeprovisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hive().V1().ClusterDeprovisions().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterimagesets"): diff --git a/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..637b3b707f7 --- /dev/null +++ b/pkg/client/informers/externalversions/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + hivev1 "github.com/openshift/hive/apis/hive/v1" + versioned "github.com/openshift/hive/pkg/client/clientset/versioned" + internalinterfaces "github.com/openshift/hive/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/hive/pkg/client/listers/hive/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationInformer provides access to a shared informer and lister for +// ClusterDeploymentCustomizations. +type ClusterDeploymentCustomizationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterDeploymentCustomizationLister +} + +type clusterDeploymentCustomizationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterDeploymentCustomizationInformer constructs a new informer for ClusterDeploymentCustomization type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterDeploymentCustomizationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HiveV1().ClusterDeploymentCustomizations(namespace).Watch(context.TODO(), options) + }, + }, + &hivev1.ClusterDeploymentCustomization{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterDeploymentCustomizationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterDeploymentCustomizationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterDeploymentCustomizationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&hivev1.ClusterDeploymentCustomization{}, f.defaultInformer) +} + +func (f *clusterDeploymentCustomizationInformer) Lister() v1.ClusterDeploymentCustomizationLister { + return v1.NewClusterDeploymentCustomizationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/hive/v1/interface.go b/pkg/client/informers/externalversions/hive/v1/interface.go index d73a5da9ab5..0659201eae6 100644 --- a/pkg/client/informers/externalversions/hive/v1/interface.go +++ b/pkg/client/informers/externalversions/hive/v1/interface.go @@ -14,6 +14,8 @@ type Interface interface { ClusterClaims() ClusterClaimInformer // ClusterDeployments returns a ClusterDeploymentInformer. ClusterDeployments() ClusterDeploymentInformer + // ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. + ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer // ClusterDeprovisions returns a ClusterDeprovisionInformer. ClusterDeprovisions() ClusterDeprovisionInformer // ClusterImageSets returns a ClusterImageSetInformer. @@ -70,6 +72,11 @@ func (v *version) ClusterDeployments() ClusterDeploymentInformer { return &clusterDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// ClusterDeploymentCustomizations returns a ClusterDeploymentCustomizationInformer. +func (v *version) ClusterDeploymentCustomizations() ClusterDeploymentCustomizationInformer { + return &clusterDeploymentCustomizationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // ClusterDeprovisions returns a ClusterDeprovisionInformer. func (v *version) ClusterDeprovisions() ClusterDeprovisionInformer { return &clusterDeprovisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..dc1ff4923ad --- /dev/null +++ b/pkg/client/listers/hive/v1/clusterdeploymentcustomization.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/hive/apis/hive/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterDeploymentCustomizationLister helps list ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. + ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister + ClusterDeploymentCustomizationListerExpansion +} + +// clusterDeploymentCustomizationLister implements the ClusterDeploymentCustomizationLister interface. +type clusterDeploymentCustomizationLister struct { + indexer cache.Indexer +} + +// NewClusterDeploymentCustomizationLister returns a new ClusterDeploymentCustomizationLister. +func NewClusterDeploymentCustomizationLister(indexer cache.Indexer) ClusterDeploymentCustomizationLister { + return &clusterDeploymentCustomizationLister{indexer: indexer} +} + +// List lists all ClusterDeploymentCustomizations in the indexer. +func (s *clusterDeploymentCustomizationLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// ClusterDeploymentCustomizations returns an object that can list and get ClusterDeploymentCustomizations. +func (s *clusterDeploymentCustomizationLister) ClusterDeploymentCustomizations(namespace string) ClusterDeploymentCustomizationNamespaceLister { + return clusterDeploymentCustomizationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ClusterDeploymentCustomizationNamespaceLister helps list and get ClusterDeploymentCustomizations. +// All objects returned here must be treated as read-only. +type ClusterDeploymentCustomizationNamespaceLister interface { + // List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) + // Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ClusterDeploymentCustomization, error) + ClusterDeploymentCustomizationNamespaceListerExpansion +} + +// clusterDeploymentCustomizationNamespaceLister implements the ClusterDeploymentCustomizationNamespaceLister +// interface. +type clusterDeploymentCustomizationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ClusterDeploymentCustomizations in the indexer for a given namespace. +func (s clusterDeploymentCustomizationNamespaceLister) List(selector labels.Selector) (ret []*v1.ClusterDeploymentCustomization, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterDeploymentCustomization)) + }) + return ret, err +} + +// Get retrieves the ClusterDeploymentCustomization from the indexer for a given namespace and name. +func (s clusterDeploymentCustomizationNamespaceLister) Get(name string) (*v1.ClusterDeploymentCustomization, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterdeploymentcustomization"), name) + } + return obj.(*v1.ClusterDeploymentCustomization), nil +} diff --git a/pkg/client/listers/hive/v1/expansion_generated.go b/pkg/client/listers/hive/v1/expansion_generated.go index 2f913b5fa55..b07f9b98377 100644 --- a/pkg/client/listers/hive/v1/expansion_generated.go +++ b/pkg/client/listers/hive/v1/expansion_generated.go @@ -26,6 +26,14 @@ type ClusterDeploymentListerExpansion interface{} // ClusterDeploymentNamespaceLister. type ClusterDeploymentNamespaceListerExpansion interface{} +// ClusterDeploymentCustomizationListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationLister. +type ClusterDeploymentCustomizationListerExpansion interface{} + +// ClusterDeploymentCustomizationNamespaceListerExpansion allows custom methods to be added to +// ClusterDeploymentCustomizationNamespaceLister. +type ClusterDeploymentCustomizationNamespaceListerExpansion interface{} + // ClusterDeprovisionListerExpansion allows custom methods to be added to // ClusterDeprovisionLister. type ClusterDeprovisionListerExpansion interface{} diff --git a/pkg/clusterresource/openstack.go b/pkg/clusterresource/openstack.go index ee6be5fa338..f0cbc53d39d 100644 --- a/pkg/clusterresource/openstack.go +++ b/pkg/clusterresource/openstack.go @@ -43,6 +43,13 @@ type OpenStackCloudBuilder struct { MasterFlavor string } +func NewOpenStackCloudBuilderFromSecret(credsSecret *corev1.Secret) *OpenStackCloudBuilder { + cloudsYamlContent := credsSecret.Data[constants.OpenStackCredentialsName] + return &OpenStackCloudBuilder{ + CloudsYAMLContent: cloudsYamlContent, + } +} + func (p *OpenStackCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/clusterresource/ovirt.go b/pkg/clusterresource/ovirt.go index db55536b533..e435f3da8a5 100644 --- a/pkg/clusterresource/ovirt.go +++ b/pkg/clusterresource/ovirt.go @@ -38,6 +38,13 @@ type OvirtCloudBuilder struct { CACert []byte } +func NewOvirtCloudBuilderFromSecret(credsSecret *corev1.Secret) *OvirtCloudBuilder { + ovirtConfigYamlContent := credsSecret.Data[constants.OvirtCredentialsName] + return &OvirtCloudBuilder{ + OvirtConfig: ovirtConfigYamlContent, + } +} + func (p *OvirtCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/clusterresource/vsphere.go b/pkg/clusterresource/vsphere.go index 3b0c6cbf97a..fcf823aa07f 100644 --- a/pkg/clusterresource/vsphere.go +++ b/pkg/clusterresource/vsphere.go @@ -54,6 +54,17 @@ type VSphereCloudBuilder struct { CACert []byte } +func NewVSphereCloudBuilderFromSecret(credsSecret, certsSecret *corev1.Secret) *VSphereCloudBuilder { + username := credsSecret.Data[constants.UsernameSecretKey] + password := credsSecret.Data[constants.PasswordSecretKey] + cacert := certsSecret.Data[".cacert"] + return &VSphereCloudBuilder{ + Username: string(username), + Password: string(password), + CACert: cacert, + } +} + func (p *VSphereCloudBuilder) GenerateCredentialsSecret(o *Builder) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 30c53149841..499d659fefc 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" librarygocontroller "github.com/openshift/library-go/pkg/controller" "github.com/openshift/library-go/pkg/manifest" "github.com/openshift/library-go/pkg/verify" @@ -1383,13 +1384,20 @@ func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.Clu return reconcile.Result{}, err } + if deprovisioned { + if err := r.releaseCustomization(cd, cdLog); err != nil { + cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error releasing inventory customization") + return reconcile.Result{}, err + } + } + switch { case !deprovisioned: return reconcile.Result{}, nil case !dnsZoneGone: return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil default: - cdLog.Infof("DNSZone gone and deprovision request completed, removing finalizer") + cdLog.Infof("DNSZone gone, customization gone and deprovision request completed, removing deprovision finalizer") if err := r.removeClusterDeploymentFinalizer(cd, cdLog); err != nil { cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") return reconcile.Result{}, err @@ -1422,6 +1430,49 @@ func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1 return nil } +func (r *ReconcileClusterDeployment) releaseCustomization(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error { + cpRef := cd.Spec.ClusterPoolRef + if cpRef == nil || cpRef.CustomizationRef == nil { + return nil + } + + cdc := &hivev1.ClusterDeploymentCustomization{} + cdcNamespace := cpRef.Namespace + cdcName := cpRef.CustomizationRef.Name + cdcLog := cdLog.WithField("customization", cdcName).WithField("namespace", cdcNamespace) + err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: cdcNamespace, Name: cdcName}, cdc) + if err != nil { + if apierrors.IsNotFound(err) { + cdcLog.Info("customization not found, nothing to release") + return nil + } + cdcLog.WithError(err).Error("error reading customization") + return err + } + + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "available", + }) + + if cdc.Status.ClusterPoolRef != nil || cdc.Status.ClusterDeploymentRef != nil { + cdc.Status.ClusterPoolRef = nil + cdc.Status.ClusterDeploymentRef = nil + changed = true + } + + if changed { + if err := r.Status().Update(context.Background(), cdc); err != nil { + cdcLog.WithError(err).Error("failed to update ClusterDeploymentCustomizationAvailable condition") + return err + } + } + + return nil +} + // setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation // to when the dnszone became ready, and set a metric to report the delay. // Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered. diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go index d06115a8317..c9595d181e8 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/crypto/openpgp" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -119,6 +120,15 @@ func TestClusterDeploymentReconcile(t *testing.T) { return nil } + getCDC := func(c client.Client) *hivev1.ClusterDeploymentCustomization { + cdc := &hivev1.ClusterDeploymentCustomization{} + err := c.Get(context.TODO(), client.ObjectKey{Name: testName, Namespace: testNamespace}, cdc) + if err == nil { + return cdc + } + return nil + } + getDNSZone := func(c client.Client) *hivev1.DNSZone { zone := &hivev1.DNSZone{} err := c.Get(context.TODO(), client.ObjectKey{Name: testName + "-zone", Namespace: testNamespace}, zone) @@ -1874,6 +1884,36 @@ func TestClusterDeploymentReconcile(t *testing.T) { require.Nil(t, cd, "expected ClusterDeployment to be deleted") }, }, + { + name: "release customization on deprovision", + existing: []runtime.Object{ + testClusterDeploymentCustomization(testName), + func() *hivev1.ClusterDeployment { + cd := testClusterDeploymentWithInitializedConditions(testClusterDeployment()) + cd.Spec.Installed = true + cd.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{ + Namespace: testNamespace, + CustomizationRef: &corev1.LocalObjectReference{Name: testName}, + } + now := metav1.Now() + cd.DeletionTimestamp = &now + return cd + }(), + testclusterdeprovision.Build( + testclusterdeprovision.WithNamespace(testNamespace), + testclusterdeprovision.WithName(testName), + testclusterdeprovision.Completed(), + ), + }, + validate: func(c client.Client, t *testing.T) { + testassert.AssertCDCConditions(t, getCDC(c), []conditionsv1.Condition{{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "available", + }}) + }, + }, { name: "deprovision finished", existing: []runtime.Object{ @@ -3211,6 +3251,13 @@ func testClusterDeploymentWithInitializedConditions(cd *hivev1.ClusterDeployment return cd } +func testClusterDeploymentCustomization(name string) *hivev1.ClusterDeploymentCustomization { + cdc := &hivev1.ClusterDeploymentCustomization{} + cdc.Name = name + cdc.Namespace = testNamespace + return cdc +} + func testInstalledClusterDeployment(installedAt time.Time) *hivev1.ClusterDeployment { cd := testClusterDeployment() cd.Spec.Installed = true diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index e52374ed1e0..df1ad6d6c6e 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -2,11 +2,13 @@ package clusterpool import ( "context" + "encoding/json" "fmt" "math" "reflect" "sort" + yamlpatch "github.com/krishicks/yaml-patch" "github.com/pkg/errors" log "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -54,6 +56,7 @@ var ( hivev1.ClusterPoolMissingDependenciesCondition, hivev1.ClusterPoolCapacityAvailableCondition, hivev1.ClusterPoolAllClustersCurrentCondition, + hivev1.ClusterPoolInventoryValidCondition, } ) @@ -169,9 +172,54 @@ func AddToManager(mgr manager.Manager, r *ReconcileClusterPool, concurrentReconc return err } + // Watch for changes to ClusterDeploymentCustomizations + if err := c.Watch( + &source.Kind{Type: &hivev1.ClusterDeploymentCustomization{}}, + handler.EnqueueRequestsFromMapFunc( + requestsForCDCResources(r.Client, r.logger)), + ); err != nil { + return err + } + return nil } +func requestsForCDCResources(c client.Client, logger log.FieldLogger) handler.MapFunc { + return func(o client.Object) []reconcile.Request { + cdc, ok := o.(*hivev1.ClusterDeploymentCustomization) + if !ok { + return nil + } + + cpList := &hivev1.ClusterPoolList{} + if err := c.List(context.Background(), cpList, client.InNamespace(o.GetNamespace())); err != nil { + logger.WithError(err).Log(controllerutils.LogLevel(err), "failed to list cluster pools for CDC resource") + return nil + } + + var requests []reconcile.Request + for _, cpl := range cpList.Items { + if cpl.Spec.Inventory == nil { + continue + } + for _, entry := range cpl.Spec.Inventory { + if entry.Name != cdc.Name { + continue + } + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cpl.Namespace, + Name: cpl.Name, + }, + }) + break + } + } + + return requests + } +} + func requestsForCDRBACResources(c client.Client, resourceName string, logger log.FieldLogger) handler.MapFunc { return func(o client.Object) []reconcile.Request { if o.GetName() != resourceName { @@ -251,6 +299,10 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } + if p := clp.Spec.Platform; clp.Spec.RunningCount != clp.Spec.Size && (p.OpenStack != nil || p.Ovirt != nil || p.VSphere != nil) { + return reconcile.Result{}, errors.New("Hibernation is not supported on Openstack, VShpere and Ovirt, unless runningCount==size") + } + // Initialize cluster pool conditions if not set newConditions, changed := controllerutils.InitializeClusterPoolConditions(clp.Status.Conditions, clusterPoolConditions) if changed { @@ -295,8 +347,16 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } + cdcs, err := getAllCustomizationsForPool(r.Client, clp, logger) + if err != nil { + return reconcile.Result{}, err + } + claims.SyncClusterDeploymentAssignments(r.Client, cds, logger) cds.SyncClaimAssignments(r.Client, claims, logger) + if err := cdcs.SyncClusterDeploymentCustomizationAssignments(r.Client, clp, cds, logger); err != nil { + return reconcile.Result{}, err + } origStatus := clp.Status.DeepCopy() clp.Status.Size = int32(len(cds.Unassigned(true))) @@ -365,7 +425,10 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. // If too few, create new InstallConfig and ClusterDeployment. case drift < 0 && availableCapacity > 0: toAdd := minIntVarible(-drift, availableCapacity, availableCurrent) - if err := r.addClusters(clp, poolVersion, cds, toAdd, logger); err != nil { + if clp.Spec.Inventory != nil { + toAdd = minIntVarible(toAdd, len(cdcs.Unassigned())) + } + if err := r.addClusters(clp, poolVersion, cds, toAdd, cdcs, logger); err != nil { log.WithError(err).Error("error adding clusters") return reconcile.Result{}, err } @@ -480,6 +543,12 @@ func calculatePoolVersion(clp *hivev1.ClusterPool) string { ba = append(ba, deephash.Hash(clp.Spec.BaseDomain)...) ba = append(ba, deephash.Hash(clp.Spec.ImageSetRef)...) ba = append(ba, deephash.Hash(clp.Spec.InstallConfigSecretTemplateRef)...) + // Inventory changes the behavior of cluster pool, thus it needs to be in the pool version. + // But to avoid redployment of clusters if inventory changes, a fixed string is added to pool version. + // https://github.com/openshift/hive/blob/master/docs/enhancements/clusterpool-inventory.md#pool-version + if clp.Spec.Inventory != nil { + ba = append(ba, []byte("hasInventory")...) + } // Hash of hashes to ensure fixed length return fmt.Sprintf("%x", deephash.Hash(ba)) } @@ -597,6 +666,7 @@ func (r *ReconcileClusterPool) addClusters( poolVersion string, cds *cdCollection, newClusterCount int, + cdcs *cdcCollection, logger log.FieldLogger, ) error { logger.WithField("count", newClusterCount).Info("Adding new clusters") @@ -635,7 +705,7 @@ func (r *ReconcileClusterPool) addClusters( } for i := 0; i < newClusterCount; i++ { - cd, err := r.createCluster(clp, cloudBuilder, pullSecret, installConfigTemplate, poolVersion, logger) + cd, err := r.createCluster(clp, cloudBuilder, pullSecret, installConfigTemplate, poolVersion, cdcs, logger) if err != nil { return err } @@ -651,8 +721,11 @@ func (r *ReconcileClusterPool) createCluster( pullSecret string, installConfigTemplate string, poolVersion string, + cdcs *cdcCollection, logger log.FieldLogger, ) (*hivev1.ClusterDeployment, error) { + var err error + ns, err := r.createRandomNamespace(clp) if err != nil { logger.WithError(err).Error("error obtaining random namespace") @@ -696,18 +769,30 @@ func (r *ReconcileClusterPool) createCluster( poolKey := types.NamespacedName{Namespace: clp.Namespace, Name: clp.Name}.String() r.expectations.ExpectCreations(poolKey, 1) var cd *hivev1.ClusterDeployment + var secret *corev1.Secret + var cdPos int // Add the ClusterPoolRef to the ClusterDeployment, and move it to the end of the slice. for i, obj := range objs { - var ok bool - cd, ok = obj.(*hivev1.ClusterDeployment) - if !ok { - continue + if cdTmp, ok := obj.(*hivev1.ClusterDeployment); ok { + cd = cdTmp + cdPos = i + poolRef := poolReference(clp) + cd.Spec.ClusterPoolRef = &poolRef + if clp.Spec.Inventory != nil { + cd.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcs.unassigned[0].Name} + } + } else if secretTmp := isInstallConfigSecret(obj); secretTmp != nil { + secret = secretTmp } - poolRef := poolReference(clp) - cd.Spec.ClusterPoolRef = &poolRef - lastIndex := len(objs) - 1 - objs[i], objs[lastIndex] = objs[lastIndex], objs[i] } + + if err := r.patchInstallConfig(clp, cd, secret, cdcs, logger); err != nil { + return nil, err + } + + // Move the ClusterDeployment to the end of the slice + lastIndex := len(objs) - 1 + objs[cdPos], objs[lastIndex] = objs[lastIndex], objs[cdPos] // Create the resources. for _, obj := range objs { if err := r.Client.Create(context.Background(), obj.(client.Object)); err != nil { @@ -719,6 +804,69 @@ func (r *ReconcileClusterPool) createCluster( return cd, nil } +func isInstallConfigSecret(obj interface{}) *corev1.Secret { + if secret, ok := obj.(*corev1.Secret); ok { + _, ok := secret.StringData["install-config.yaml"] + if ok { + return secret + } + } + return nil +} + +// patchInstallConfig responsible for applying ClusterDeploymentCustomization and its reservation +func (r *ReconcileClusterPool) patchInstallConfig(clp *hivev1.ClusterPool, cd *hivev1.ClusterDeployment, secret *corev1.Secret, cdcs *cdcCollection, logger log.FieldLogger) error { + if clp.Spec.Inventory == nil { + return nil + } + if cd.Spec.ClusterPoolRef.CustomizationRef == nil { + return errors.New("missing customization") + } + + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := r.Client.Get(context.Background(), client.ObjectKey{Namespace: clp.Namespace, Name: cd.Spec.ClusterPoolRef.CustomizationRef.Name}, cdc); err != nil { + if apierrors.IsNotFound(err) { + return errors.New("missing customization") + } + return err + } + + newPatch := yamlpatch.Patch{} + for _, patch := range cdc.Spec.InstallConfigPatches { + var value interface{} + value = patch.Value + newPatch = append(newPatch, yamlpatch.Operation{ + Op: yamlpatch.Op(patch.Op), + Path: yamlpatch.OpPath(patch.Path), + From: yamlpatch.OpPath(patch.From), + Value: yamlpatch.NewNode(&value), + }) + } + + installConfig, err := newPatch.Apply([]byte(secret.StringData["install-config.yaml"])) + if err != nil { + cdcs.BrokenBySyntax(r, cdc, fmt.Sprint(err)) + cdcs.UpdateInventoryValidCondition(r, clp) + return err + } + + configJson, err := json.Marshal(cdc.Spec) + if err != nil { + return err + } + + if err := cdcs.Reserve(r, cdc, cd.Name, clp.Name); err != nil { + return err + } + if err := cdcs.InstallationPending(r, cdc); err != nil { + return err + } + + cdc.Status.LastAppliedConfiguration = string(configJson) + secret.StringData["install-config.yaml"] = string(installConfig) + return nil +} + func (r *ReconcileClusterPool) createRandomNamespace(clp *hivev1.ClusterPool) (*corev1.Namespace, error) { namespaceName := apihelpers.GetResourceName(clp.Name, utilrand.String(5)) ns := &corev1.Namespace{ @@ -813,6 +961,16 @@ func (r *ReconcileClusterPool) reconcileDeletedPool(pool *hivev1.ClusterPool, lo if !controllerutils.HasFinalizer(pool, finalizer) { return nil } + + cdcs, err := getAllCustomizationsForPool(r.Client, pool, logger) + if err != nil { + return err + } + + if err := cdcs.RemoveFinalizer(r.Client, pool); err != nil { + return err + } + // Don't care about the poolVersion here since we're deleting everything. cds, err := getAllClusterDeploymentsForPool(r.Client, pool, "", logger) if err != nil { @@ -827,6 +985,7 @@ func (r *ReconcileClusterPool) reconcileDeletedPool(pool *hivev1.ClusterPool, lo return errors.Wrap(err, "could not delete ClusterDeployment") } } + // TODO: Wait to remove finalizer until all (unclaimed??) clusters are gone. controllerutils.DeleteFinalizer(pool, finalizer) if err := r.Update(context.Background(), pool); err != nil { @@ -1006,7 +1165,59 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg cloudBuilder.Region = platform.Azure.Region cloudBuilder.CloudName = platform.Azure.CloudName return cloudBuilder, nil - // TODO: OpenStack, VMware, and Ovirt. + case platform.OpenStack != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.OpenStack.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + cloudBuilder := clusterresource.NewOpenStackCloudBuilderFromSecret(credsSecret) + cloudBuilder.Cloud = platform.OpenStack.Cloud + return cloudBuilder, nil + case platform.VSphere != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.VSphere.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + + certsSecret, err := r.getCredentialsSecret(pool, platform.VSphere.CertificatesSecretRef.Name, logger) + if err != nil { + return nil, err + } + + if _, ok := certsSecret.Data[".cacert"]; !ok { + return nil, err + } + + cloudBuilder := clusterresource.NewVSphereCloudBuilderFromSecret(credsSecret, certsSecret) + cloudBuilder.Datacenter = platform.VSphere.Datacenter + cloudBuilder.DefaultDatastore = platform.VSphere.DefaultDatastore + cloudBuilder.VCenter = platform.VSphere.VCenter + cloudBuilder.Cluster = platform.VSphere.Cluster + cloudBuilder.Folder = platform.VSphere.Folder + cloudBuilder.Network = platform.VSphere.Network + + return cloudBuilder, nil + case platform.Ovirt != nil: + credsSecret, err := r.getCredentialsSecret(pool, platform.Ovirt.CredentialsSecretRef.Name, logger) + if err != nil { + return nil, err + } + + certsSecret, err := r.getCredentialsSecret(pool, platform.Ovirt.CertificatesSecretRef.Name, logger) + if err != nil { + return nil, err + } + + if _, ok := certsSecret.Data[".cacert"]; !ok { + return nil, err + } + + cloudBuilder := clusterresource.NewOvirtCloudBuilderFromSecret(credsSecret) + cloudBuilder.StorageDomainID = platform.Ovirt.StorageDomainID + cloudBuilder.ClusterID = platform.Ovirt.ClusterID + cloudBuilder.NetworkName = platform.Ovirt.NetworkName + + return cloudBuilder, nil default: logger.Info("unsupported platform") return nil, errors.New("unsupported platform") diff --git a/pkg/controller/clusterpool/clusterpool_controller_test.go b/pkg/controller/clusterpool/clusterpool_controller_test.go index 59ce0524613..9ef0c325a71 100644 --- a/pkg/controller/clusterpool/clusterpool_controller_test.go +++ b/pkg/controller/clusterpool/clusterpool_controller_test.go @@ -2,6 +2,9 @@ package clusterpool import ( "context" + "encoding/json" + "fmt" + "regexp" "sort" "testing" "time" @@ -22,12 +25,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/apis/hive/v1/aws" "github.com/openshift/hive/pkg/constants" controllerutils "github.com/openshift/hive/pkg/controller/utils" testclaim "github.com/openshift/hive/pkg/test/clusterclaim" testcd "github.com/openshift/hive/pkg/test/clusterdeployment" + testcdc "github.com/openshift/hive/pkg/test/clusterdeploymentcustomization" testcp "github.com/openshift/hive/pkg/test/clusterpool" testgeneric "github.com/openshift/hive/pkg/test/generic" testsecret "github.com/openshift/hive/pkg/test/secret" @@ -72,7 +77,23 @@ func TestReconcileClusterPool(t *testing.T) { Status: corev1.ConditionUnknown, Type: hivev1.ClusterPoolAllClustersCurrentCondition, }), + testcp.WithCondition(hivev1.ClusterPoolCondition{ + Status: corev1.ConditionUnknown, + Type: hivev1.ClusterPoolInventoryValidCondition, + }), ) + + inventoryPoolVersion := "e0bc44f74a546c63" + inventoryPoolBuilder := func() testcp.Builder { + return initializedPoolBuilder.Options( + testcp.WithInventory([]string{"test-cdc-1"}), + testcp.WithCondition(hivev1.ClusterPoolCondition{ + Status: corev1.ConditionUnknown, + Type: hivev1.ClusterPoolInventoryValidCondition, + }), + ) + } + cdBuilder := func(name string) testcd.Builder { return testcd.FullBuilder(name, name, scheme).Options( testcd.WithPowerState(hivev1.ClusterPowerStateHibernating), @@ -93,6 +114,7 @@ func TestReconcileClusterPool(t *testing.T) { noClusterImageSet bool noCredsSecret bool expectError bool + expectInventory bool expectedTotalClusters int expectedObservedSize int32 expectedObservedReady int32 @@ -101,17 +123,22 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus corev1.ConditionStatus expectedCapacityStatus corev1.ConditionStatus expectedCDCurrentStatus corev1.ConditionStatus + expectedInventoryValidStatus corev1.ConditionStatus + expectedInventoryMessage map[string][]string + expectedCDCReason map[string]string expectedMissingDependenciesMessage string expectedAssignedClaims int expectedUnassignedClaims int expectedAssignedCDs int + expectedAssignedCDCs map[string]string expectedRunning int expectedLabels map[string]string // Tested on all clusters, so will not work if your test has pre-existing cds in the pool. // Map, keyed by claim name, of expected Status.Conditions['Pending'].Reason. // (The clusterpool controller always sets this condition's Status to True.) // Not checked if nil. - expectedClaimPendingReasons map[string]string - expectPoolVersionChanged bool + expectedClaimPendingReasons map[string]string + expectedInventoryAssignmentOrder []string + expectPoolVersionChanged bool }{ { name: "initialize conditions", @@ -121,6 +148,7 @@ func TestReconcileClusterPool(t *testing.T) { expectedMissingDependenciesStatus: corev1.ConditionUnknown, expectedCapacityStatus: corev1.ConditionUnknown, expectedCDCurrentStatus: corev1.ConditionUnknown, + expectedInventoryValidStatus: corev1.ConditionUnknown, }, { name: "copyover fields", @@ -166,6 +194,258 @@ func TestReconcileClusterPool(t *testing.T) { }, expectPoolVersionChanged: true, }, + { + name: "cp with inventory and cdc exists is valid", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), + }, + expectedTotalClusters: 1, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryValidStatus: corev1.ConditionTrue, + expectInventory: true, + expectedAssignedCDCs: map[string]string{"test-cdc-1": testLeasePoolName}, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonInstallationPending}, + }, + { + name: "cp with inventory and available cdc deleted without hold", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder( + testNamespace, "test-cdc-1", scheme, + ).GenericOptions(testgeneric.Deleted()).Build(), + }, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectInventory: true, + expectError: true, + expectedCDCurrentStatus: corev1.ConditionUnknown, + }, + { + name: "cp with inventory and available cdc with finalizer deleted without hold", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder( + testNamespace, "test-cdc-1", scheme, + ).GenericOptions( + testgeneric.Deleted(), + testgeneric.WithFinalizer(fmt.Sprintf("hive.openshift.io/%s", testLeasePoolName)), + ).Build(), + }, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectInventory: true, + expectError: true, + expectedCDCurrentStatus: corev1.ConditionUnknown, + }, + { + name: "cp with inventory and reserved cdc deletion on hold", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcd.FullBuilder(testNamespace, "c1", scheme).Build( + testcd.WithPoolVersion(inventoryPoolVersion), + testcd.WithClusterPoolReference(testNamespace, testLeasePoolName, "claim"), + testcd.WithCustomization("test-cdc-1"), + testcd.Running(), + ), + testcdc.FullBuilder( + testNamespace, "test-cdc-1", scheme, + ).GenericOptions( + testgeneric.Deleted(), + testgeneric.WithFinalizer(finalizer), + ).Build( + testcdc.WithPool(testLeasePoolName), + testcdc.WithCD("c1"), + testcdc.Reserved(), + ), + }, + expectedTotalClusters: 1, + expectedRunning: 1, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectInventory: true, + expectError: false, + expectedCDCurrentStatus: corev1.ConditionTrue, + expectedAssignedCDs: 1, + expectedUnassignedClaims: 0, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, + }, + { + name: "cp with inventory and cdc doesn't exist is not valid - missing", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + }, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryValidStatus: corev1.ConditionFalse, + expectedInventoryMessage: map[string][]string{"Missing": {"test-cdc-1"}}, + expectedCDCurrentStatus: corev1.ConditionTrue, // huh? + expectInventory: true, + expectError: false, + }, + { + name: "cp with inventory and cdc patch broken is not valid - BrokenBySyntax", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( + testcdc.WithPatch("/broken/path", "replace", "x"), + ), + }, + expectedTotalClusters: 0, + expectedObservedSize: 0, + expectedObservedReady: 0, + expectedInventoryValidStatus: corev1.ConditionFalse, + expectedInventoryMessage: map[string][]string{"BrokenBySyntax": {"test-cdc-1"}}, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonBrokenSyntax}, + expectInventory: true, + expectedCDCurrentStatus: corev1.ConditionUnknown, + expectError: true, + }, + { + name: "cp with inventory and cd provisioning failed is not valid - BrokenByCloud", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), + testcd.FullBuilder("c1", "c1", scheme).Build( + testcd.WithPoolVersion("e0bc44f74a546c63"), + testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName), + testcd.WithCustomization("test-cdc-1"), + testcd.Broken(), + ), + }, + expectedTotalClusters: 0, + expectedObservedSize: 1, + expectedObservedReady: 0, + expectedInventoryValidStatus: corev1.ConditionFalse, + expectedInventoryMessage: map[string][]string{"BrokenByCloud": {"test-cdc-1"}}, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonBrokenCloud}, + expectInventory: true, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, + }, + { + name: "cp with inventory and good cdc is valid, cd created", + existing: []runtime.Object{ + inventoryPoolBuilder().Build(testcp.WithSize(1)), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build(), + unclaimedCDBuilder("c1").Build( + testcd.WithCustomization("test-cdc-1"), + testcd.Running(), + ), + }, + expectedTotalClusters: 0, + expectedObservedSize: 1, + expectedObservedReady: 1, + expectedInventoryValidStatus: corev1.ConditionTrue, + expectedCDCReason: map[string]string{"test-cdc-1": hivev1.CustomizationApplyReasonSucceeded}, + expectInventory: true, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, + }, + { + name: "cp with inventory - correct prioritization - same status", + existing: []runtime.Object{ + initializedPoolBuilder.Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-successful-old", "test-cdc-unused-new"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-successful-old", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build(), + }, + expectedTotalClusters: 1, + expectedInventoryValidStatus: corev1.ConditionTrue, + expectInventory: true, + expectedInventoryAssignmentOrder: []string{"test-cdc-successful-old"}, + expectedAssignedCDCs: map[string]string{"test-cdc-successful-old": ""}, + }, + { + name: "cp with inventory - correct prioritization - mix and multiple deployments", + existing: []runtime.Object{ + initializedPoolBuilder.Build( + testcp.WithSize(2), + testcp.WithInventory([]string{"test-cdc-successful-old", "test-cdc-unused-new", "test-cdc-broken-old"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-successful-old", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonBrokenCloud, nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-unused-new", scheme).Build(), + }, + expectedTotalClusters: 2, + expectedInventoryValidStatus: corev1.ConditionFalse, + expectInventory: true, + expectedInventoryAssignmentOrder: []string{"test-cdc-successful-old", "test-cdc-unused-new"}, + expectedAssignedCDCs: map[string]string{ + "test-cdc-successful-old": "", + "test-cdc-unused-new": "", + }, + }, + + { + name: "cp with inventory - correct prioritization - successful vs broken", + existing: []runtime.Object{ + initializedPoolBuilder.Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-successful-new", "test-cdc-broken-old"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-broken-old", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonBrokenCloud, nowish.Add(-time.Hour)), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-successful-new", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish), + ), + }, + expectedTotalClusters: 1, + expectedInventoryValidStatus: corev1.ConditionFalse, + expectInventory: true, + expectedInventoryAssignmentOrder: []string{"test-cdc-successful-new"}, + expectedAssignedCDCs: map[string]string{"test-cdc-successful-new": ""}, + }, + { + name: "cp with inventory - release cdc when cd is missing", + existing: []runtime.Object{ + inventoryPoolBuilder().Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-1"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( + testcdc.WithApplySucceeded(hivev1.CustomizationApplyReasonSucceeded, nowish.Add(-time.Hour)), + testcdc.WithPool(testLeasePoolName), + testcdc.WithCD("c1"), + testcdc.Reserved(), + ), + }, + expectedTotalClusters: 1, + expectInventory: true, + expectedAssignedCDCs: map[string]string{"test-cdc-1": ""}, + }, + { + name: "cp with inventory - fix cdc when cd reference exists", + existing: []runtime.Object{ + inventoryPoolBuilder().Build( + testcp.WithSize(1), + testcp.WithInventory([]string{"test-cdc-1"}), + ), + testcdc.FullBuilder(testNamespace, "test-cdc-1", scheme).Build( + testcdc.Available(), + ), + testcd.FullBuilder("c1", "c1", scheme).Build( + testcd.WithUnclaimedClusterPoolReference(testNamespace, testLeasePoolName), + testcd.WithCustomization("test-cdc-1"), + ), + }, + expectedTotalClusters: 1, + expectedObservedSize: 1, + expectInventory: true, + expectedAssignedCDCs: map[string]string{"test-cdc-1": "c1"}, + expectedCDCurrentStatus: corev1.ConditionUnknown, + }, { // This also proves we only delete one stale cluster at a time name: "delete oldest stale cluster first", @@ -1417,6 +1697,11 @@ func TestReconcileClusterPool(t *testing.T) { Build(testsecret.WithDataKeyValue("dummykey", []byte("dummyval"))), ) } + expectedPoolVersion := initialPoolVersion + if test.expectInventory { + expectedPoolVersion = inventoryPoolVersion + } + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(test.existing...).Build() logger := log.New() logger.SetLevel(log.DebugLevel) @@ -1440,7 +1725,6 @@ func TestReconcileClusterPool(t *testing.T) { } else { assert.NoError(t, err, "expected no error from reconcile") } - pool := &hivev1.ClusterPool{} err = fakeClient.Get(context.Background(), client.ObjectKey{Namespace: testNamespace, Name: testLeasePoolName}, pool) @@ -1453,9 +1737,9 @@ func TestReconcileClusterPool(t *testing.T) { assert.Equal(t, test.expectedObservedReady, pool.Status.Ready, "unexpected observed ready count") currentPoolVersion := calculatePoolVersion(pool) assert.Equal( - t, test.expectPoolVersionChanged, currentPoolVersion != initialPoolVersion, + t, test.expectPoolVersionChanged, currentPoolVersion != expectedPoolVersion, "expectPoolVersionChanged is %t\ninitial %q\nfinal %q", - test.expectPoolVersionChanged, initialPoolVersion, currentPoolVersion) + test.expectPoolVersionChanged, expectedPoolVersion, currentPoolVersion) expectedCDCurrentStatus := test.expectedCDCurrentStatus if expectedCDCurrentStatus == "" { expectedCDCurrentStatus = corev1.ConditionTrue @@ -1486,6 +1770,32 @@ func TestReconcileClusterPool(t *testing.T) { } } + if test.expectedInventoryValidStatus != "" { + inventoryValidCondition := controllerutils.FindCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { + assert.Equal(t, test.expectedInventoryValidStatus, inventoryValidCondition.Status, + "unexpcted InventoryValid condition status %s", inventoryValidCondition.Message) + } + } + + if test.expectedInventoryMessage != nil { + inventoryValidCondition := controllerutils.FindCondition(pool.Status.Conditions, hivev1.ClusterPoolInventoryValidCondition) + if assert.NotNil(t, inventoryValidCondition, "did not find InventoryValid condition") { + expectedInventoryMessage := map[string][]string{} + err := json.Unmarshal([]byte(inventoryValidCondition.Message), &expectedInventoryMessage) + if err != nil { + assert.Error(t, err, "unable to parse inventory condition message") + } + for key, value := range test.expectedInventoryMessage { + if val, ok := expectedInventoryMessage[key]; ok { + assert.ElementsMatch(t, value, val, "unexpected inventory message for %s: %s", key, inventoryValidCondition.Message) + } else { + assert.Fail(t, "expected inventory message to contain key %s: %s", key, inventoryValidCondition.Message) + } + } + } + } + cds := &hivev1.ClusterDeploymentList{} err = fakeClient.List(context.Background(), cds) require.NoError(t, err) @@ -1558,6 +1868,41 @@ func TestReconcileClusterPool(t *testing.T) { } assert.Equal(t, test.expectedAssignedClaims, actualAssignedClaims, "unexpected number of assigned claims") assert.Equal(t, test.expectedUnassignedClaims, actualUnassignedClaims, "unexpected number of unassigned claims") + + cdcs := &hivev1.ClusterDeploymentCustomizationList{} + err = fakeClient.List(context.Background(), cdcs) + require.NoError(t, err) + cdcMap := make(map[string]hivev1.ClusterDeploymentCustomization, len(cdcs.Items)) + for _, cdc := range cdcs.Items { + cdcMap[cdc.Name] = cdc + + condition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if test.expectedCDCReason != nil { + if reason, ok := test.expectedCDCReason[cdc.Name]; ok { + assert.NotNil(t, condition) + assert.Equal(t, reason, condition.Reason, "expected CDC status to match") + } + } + + if test.expectedAssignedCDCs != nil { + if cdName, ok := test.expectedAssignedCDCs[cdc.Name]; ok { + assert.Regexp(t, regexp.MustCompile(cdName), cdc.Status.ClusterDeploymentRef.Name, "expected CDC assignment to CD match") + } + } + } + + if order := test.expectedInventoryAssignmentOrder; order != nil && len(order) > 0 { + lastTime := metav1.NewTime(nowish.Add(24 * -time.Hour)) + for _, cdcName := range order { + cdc := cdcMap[cdcName] + condition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if condition == nil || condition.Status == corev1.ConditionUnknown || condition.Status == corev1.ConditionTrue { + assert.Failf(t, "expected CDC %s to be assigned", cdcName) + } + assert.True(t, lastTime.Before(&condition.LastTransitionTime) || lastTime.Equal(&condition.LastTransitionTime), "expected %s to be before %s", lastTime, condition.LastTransitionTime) + lastTime = condition.LastTransitionTime + } + } }) } } diff --git a/pkg/controller/clusterpool/collections.go b/pkg/controller/clusterpool/collections.go index 64b4da6b297..ca55aef4ace 100644 --- a/pkg/controller/clusterpool/collections.go +++ b/pkg/controller/clusterpool/collections.go @@ -2,6 +2,7 @@ package clusterpool import ( "context" + "encoding/json" "errors" "fmt" "sort" @@ -15,9 +16,11 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/controller-runtime/pkg/client" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" "github.com/openshift/hive/pkg/constants" controllerutils "github.com/openshift/hive/pkg/controller/utils" + apierrors "k8s.io/apimachinery/pkg/api/errors" ) type claimCollection struct { @@ -207,6 +210,8 @@ type cdCollection struct { unknownPoolVersion []*hivev1.ClusterDeployment // Clusters whose pool version annotation doesn't match the pool's mismatchedPoolVersion []*hivev1.ClusterDeployment + // Cluster whose customization reference was removed from pool's inventory + customizationMissing []*hivev1.ClusterDeployment // All CDs in this pool byCDName map[string]*hivev1.ClusterDeployment // This contains only claimed CDs @@ -309,6 +314,17 @@ func getAllClusterDeploymentsForPool(c client.Client, pool *hivev1.ClusterPool, }).Error("unepectedly got a ClusterDeployment not belonging to this pool") continue } + customizationExists := true + if cd.Spec.ClusterPoolRef.CustomizationRef != nil { + customizationExists = false + cdcName := cd.Spec.ClusterPoolRef.CustomizationRef.Name + for _, entry := range pool.Spec.Inventory { + if cdcName == entry.Name { + customizationExists = true + break + } + } + } ref := &cdList.Items[i] cdCol.byCDName[cd.Name] = ref claimName := poolRef.ClaimName @@ -329,7 +345,7 @@ func getAllClusterDeploymentsForPool(c client.Client, pool *hivev1.ClusterPool, } else { cdCol.installing = append(cdCol.installing, ref) } - // Count stale CDs (poolVersion either unknown or mismatched) + // Count stale CDs (poolVersion either unknown or mismatched, or customizaiton was removed) if cdPoolVersion, ok := cd.Annotations[constants.ClusterDeploymentPoolSpecHashAnnotation]; !ok || cdPoolVersion == "" { // Annotation is either missing or empty. This could be due to upgrade (this CD was // created before this code was installed) or manual intervention (outside agent mucked @@ -337,8 +353,9 @@ func getAllClusterDeploymentsForPool(c client.Client, pool *hivev1.ClusterPool, cdCol.unknownPoolVersion = append(cdCol.unknownPoolVersion, ref) } else if cdPoolVersion != poolVersion { cdCol.mismatchedPoolVersion = append(cdCol.mismatchedPoolVersion, ref) + } else if cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef; cdcRef != nil && !customizationExists { + cdCol.customizationMissing = append(cdCol.customizationMissing, ref) } - } // Register all claimed CDs, even if they're deleting/marked if claimName != "" { @@ -463,6 +480,17 @@ func (cds *cdCollection) Unassigned(includeBroken bool) []*hivev1.ClusterDeploym return ret } +// Installed returns the list of ClusterDeployments which are Installed +func (cds *cdCollection) Installed() []*hivev1.ClusterDeployment { + ret := []*hivev1.ClusterDeployment{} + ret = append(ret, cds.assignable...) + ret = append(ret, cds.standby...) + for _, cd := range cds.byClaimName { + ret = append(ret, cd) + } + return ret +} + // UnknownPoolVersion returns the list of ClusterDeployments whose pool version annotation is // missing or empty. func (cds *cdCollection) UnknownPoolVersion() []*hivev1.ClusterDeployment { @@ -478,7 +506,9 @@ func (cds *cdCollection) MismatchedPoolVersion() []*hivev1.ClusterDeployment { // Stale returns the list of ClusterDeployments whose pool version annotation doesn't match the // version of the pool. Put "unknown" first becuase they're annoying. func (cds *cdCollection) Stale() []*hivev1.ClusterDeployment { - return append(cds.unknownPoolVersion, cds.mismatchedPoolVersion...) + stale := append(cds.unknownPoolVersion, cds.mismatchedPoolVersion...) + stale = append(stale, cds.customizationMissing...) + return stale } // RegisterNewCluster adds a freshly-created cluster to the cdCollection, assuming it is installing. @@ -584,6 +614,510 @@ func (cds *cdCollection) Delete(c client.Client, cdName string) error { return nil } +type cdcCollection struct { + // Unclaimed by any cluster pool CD and are not broken + unassigned []*hivev1.ClusterDeploymentCustomization + // Missing CDC means listed in pool inventory but the custom resource doesn't exist in the pool namespace + missing []string + // Used by some cluster deployment + reserved map[string]*hivev1.ClusterDeploymentCustomization + // Last Cluster Deployment failed on provision + cloud map[string]*hivev1.ClusterDeploymentCustomization + // Failed to apply patches for this cluster pool + syntax map[string]*hivev1.ClusterDeploymentCustomization + // ByCDCName are all the CDCs listed in the pool inventory, the CR exists and are mapped by name + byCDCName map[string]*hivev1.ClusterDeploymentCustomization + // Namespace are all the CDC in the namespace mapped by name + namespace map[string]*hivev1.ClusterDeploymentCustomization +} + +// getAllCustomizationsForPool is the constructor for a cdcCollection for all of the +// ClusterDeploymentCustomizations that are related to specified pool. +func getAllCustomizationsForPool(c client.Client, pool *hivev1.ClusterPool, logger log.FieldLogger) (*cdcCollection, error) { + if pool.Spec.Inventory == nil { + return &cdcCollection{}, nil + } + cdcList := &hivev1.ClusterDeploymentCustomizationList{} + if err := c.List( + context.Background(), cdcList, + client.InNamespace(pool.Namespace)); err != nil { + logger.WithField("namespace", pool.Namespace).WithError(err).Error("error listing ClusterDeploymentCustomizations") + return &cdcCollection{}, err + } + + cdcCol := cdcCollection{ + unassigned: make([]*hivev1.ClusterDeploymentCustomization, 0), + missing: make([]string, 0), + reserved: make(map[string]*hivev1.ClusterDeploymentCustomization), + cloud: make(map[string]*hivev1.ClusterDeploymentCustomization), + syntax: make(map[string]*hivev1.ClusterDeploymentCustomization), + byCDCName: make(map[string]*hivev1.ClusterDeploymentCustomization), + namespace: make(map[string]*hivev1.ClusterDeploymentCustomization), + } + + for i, cdc := range cdcList.Items { + cdcCol.namespace[cdc.Name] = &cdcList.Items[i] + } + + for _, item := range pool.Spec.Inventory { + if cdc, ok := cdcCol.namespace[item.Name]; ok { + cdcCol.byCDCName[item.Name] = cdc + availability := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if availability != nil && availability.Status == corev1.ConditionFalse { + cdcCol.reserved[item.Name] = cdc + } else { + cdcCol.unassigned = append(cdcCol.unassigned, cdc) + } + applyStatus := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if applyStatus == nil { + continue + } + if applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenCloud { + cdcCol.cloud[item.Name] = cdc + } + if applyStatus.Reason == hivev1.CustomizationApplyReasonBrokenSyntax { + cdcCol.syntax[item.Name] = cdc + } + } else { + cdcCol.missing = append(cdcCol.missing, item.Name) + } + } + + cdcCol.Sort() + + logger.WithFields(log.Fields{ + "unassignedCount": len(cdcCol.unassigned), + "missingCount": len(cdcCol.missing), + "reservedCount": len(cdcCol.reserved), + "brokenByCloudCount": len(cdcCol.cloud), + "brokenBySyntaxCount": len(cdcCol.syntax), + }).Debug("found ClusterDeploymentCustomizations for ClusterPool") + + return &cdcCol, nil +} + +// Sort unassigned oldest successful customizations to avoid using the same broken +// customization. When customizations have the same last apply status, the +// oldest used customization will be prioritized. +func (cdcs *cdcCollection) Sort() { + sort.Slice( + cdcs.unassigned, + func(i, j int) bool { + now := metav1.NewTime(time.Now()) + iStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[i].Status.Conditions, hivev1.ApplySucceededCondition) + jStatus := conditionsv1.FindStatusCondition(cdcs.unassigned[j].Status.Conditions, hivev1.ApplySucceededCondition) + iName := cdcs.unassigned[i].Name + jName := cdcs.unassigned[j].Name + if iStatus == nil || iStatus.Status == corev1.ConditionUnknown { + iStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} + iStatus.LastTransitionTime = now + } + if jStatus == nil || jStatus.Status == corev1.ConditionUnknown { + jStatus = &conditionsv1.Condition{Reason: hivev1.CustomizationApplyReasonSucceeded} + jStatus.LastTransitionTime = now + } + iTime := iStatus.LastTransitionTime + jTime := jStatus.LastTransitionTime + if iStatus.Reason == jStatus.Reason { + if iTime.Equal(&jTime) { + // Sort by name to make this deterministic + return iName < jName + } + return iTime.Before(&jTime) + } + if iStatus.Reason == hivev1.CustomizationApplyReasonSucceeded { + return true + } + if jStatus.Reason == hivev1.CustomizationApplyReasonSucceeded { + return false + } + return iName < jName + }, + ) +} + +func (cdcs *cdcCollection) Reserve(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, cdName, poolName string) error { + if cdc.Status.ClusterDeploymentRef != nil || cdc.Status.ClusterPoolRef != nil { + return errors.New("ClusterDeploymentCustomization already reserved") + } + + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Reserved", + Message: "reserved", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } else { + return errors.New("ClusterDeploymentCustomization already reserved") + } + + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: cdName} + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: poolName} + + cdcs.reserved[cdc.Name] = cdc + cdcs.byCDCName[cdc.Name] = cdc + + for i, cdci := range cdcs.unassigned { + if cdci.Name == cdc.Name { + copy(cdcs.unassigned[i:], cdcs.unassigned[i+1:]) + cdcs.unassigned = cdcs.unassigned[:len(cdcs.unassigned)-1] + break + } + } + + cdcs.Sort() + return nil +} + +func (cdcs *cdcCollection) Unassign(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "available", + }) + + if cdc.Status.ClusterDeploymentRef != nil || cdc.Status.ClusterPoolRef != nil { + cdc.Status.ClusterDeploymentRef = nil + cdc.Status.ClusterPoolRef = nil + changed = true + } + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + delete(cdcs.reserved, cdc.Name) + + cdcs.unassigned = append(cdcs.unassigned, cdc) + cdcs.Sort() + return nil +} + +func (cdcs *cdcCollection) BrokenBySyntax(c client.Client, cdc *hivev1.ClusterDeploymentCustomization, msg string) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonBrokenSyntax, + Message: msg, + }) + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + cdcs.syntax[cdc.Name] = cdc + delete(cdcs.cloud, cdc.Name) + return nil +} + +func (cdcs *cdcCollection) BrokenByCloud(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonBrokenCloud, + Message: "Cluster installation failed. This may or may not be the fault of patches. Check the installation logs.", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + cdcs.cloud[cdc.Name] = cdc + delete(cdcs.syntax, cdc.Name) + return nil +} + +func (cdcs *cdcCollection) Succeeded(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionTrue, + Reason: hivev1.CustomizationApplyReasonSucceeded, + Message: "Patches applied and cluster installed successfully", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + delete(cdcs.syntax, cdc.Name) + delete(cdcs.cloud, cdc.Name) + return nil +} + +func (cdcs *cdcCollection) InstallationPending(c client.Client, cdc *hivev1.ClusterDeploymentCustomization) error { + changed := conditionsv1.SetStatusConditionNoHeartbeat(&cdc.Status.Conditions, conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: corev1.ConditionFalse, + Reason: hivev1.CustomizationApplyReasonInstallationPending, + Message: "Patches applied; cluster is installing", + }) + + if changed { + if err := c.Status().Update(context.Background(), cdc); err != nil { + return err + } + } + + delete(cdcs.syntax, cdc.Name) + delete(cdcs.cloud, cdc.Name) + return nil +} + +func (cdcs *cdcCollection) Unassigned() []*hivev1.ClusterDeploymentCustomization { + return cdcs.unassigned +} + +func (cdcs *cdcCollection) RemoveFinalizer(c client.Client, pool *hivev1.ClusterPool) error { + poolFinalizer := fmt.Sprintf("hive.openshift.io/%s", pool.Name) + + for _, item := range pool.Spec.Inventory { + if cdc, ok := cdcs.namespace[item.Name]; ok { + controllerutils.DeleteFinalizer(cdc, poolFinalizer) + if err := c.Update(context.Background(), cdc); err != nil { + return err + } + cdcs.namespace[item.Name] = cdc + } + } + + return nil +} + +// SyncClusterDeploymentCustomizations updates CDCs and related CR status: +// - Handle deletion of CDC in the namespace +// - If there is no CD, but CDC is reserved, then we release the CDC +// - Make sure that CD <=> CDC links are legit; repair them if not. +// - Notice a Broken CD => update the CDC's ApplySucceeded condition to BrokenByCloud; +// - Notice a CD has finished installing => update the CDC's ApplySucceeded condition to Success; +// - Update ClusterPool InventoryValid condition +func (cdcs *cdcCollection) SyncClusterDeploymentCustomizationAssignments(c client.Client, pool *hivev1.ClusterPool, cds *cdCollection, logger log.FieldLogger) error { + if pool.Spec.Inventory == nil { + return nil + } + + poolFinalizer := fmt.Sprintf("hive.openshift.io/%s", pool.Name) + + // Handle deletion of CDC in the namespace + for _, cdc := range cdcs.namespace { + isDeleted := cdc.DeletionTimestamp != nil + hasFinalizer := controllerutils.HasFinalizer(cdc, poolFinalizer) + isAvailable := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if isDeleted && (isAvailable == nil || isAvailable.Status != corev1.ConditionFalse) { + // We can delete the finalizer for a deleted CDC only if it is not reserved + if hasFinalizer { + controllerutils.DeleteFinalizer(cdc, poolFinalizer) + if err := c.Update(context.Background(), cdc); err != nil { + return err + } + } + } else { + // Ensure the finalizer is present if the CDC is not deleted, OR if it is reserved + if !hasFinalizer { + controllerutils.AddFinalizer(cdc, poolFinalizer) + if err := c.Update(context.Background(), cdc); err != nil { + return err + } + } + } + } + + // If there is no CD, but CDC is reserved, then we release the CDC + for _, cdc := range cdcs.reserved { + if ref := cdc.Status.ClusterDeploymentRef; ref == nil || cds.ByName(ref.Name) == nil { + if err := cdcs.Unassign(c, cdc); err != nil { + return err + } + } + } + + // Make sure CD <=> CDC links are legit; repair them if not. + for _, cd := range cds.byCDName { + // CD has CDC + cpRef := cd.Spec.ClusterPoolRef + if cpRef.CustomizationRef == nil { + continue + } + + logger = logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "clusterdeploymentcustomization": cpRef.CustomizationRef.Name, + "namespace": cpRef.Namespace, + }) + + // CDC exists + cdc, ok := cdcs.namespace[cpRef.CustomizationRef.Name] + if !ok { + logger.Warning("CD has reference to a CDC that doesn't exist, it was forcefully removed or this is a bug") + continue + } + + // CDC is not reserved + available := conditionsv1.FindStatusCondition(cdc.Status.Conditions, conditionsv1.ConditionAvailable) + if available == nil || available.Status == corev1.ConditionUnknown || available.Status == corev1.ConditionTrue { + // CDC is used by other CD + if cdRef := cdc.Status.ClusterDeploymentRef; cdRef != nil && cdRef.Name != cd.Name { + cdOther := &hivev1.ClusterDeployment{} + if err := c.Get( + context.Background(), + client.ObjectKey{Name: cdRef.Name, Namespace: cdRef.Name}, + cdOther, + ); err != nil { + if !apierrors.IsNotFound(err) { + return err + } + } else { + // Fixing reservation should be done by the appropriate cluster pool + logger.WithFields(log.Fields{ + "parallelclusterdeployment": cdOther.Name, + "namespace": cdc.Namespace, + }).Warning("Another CD exists and has this CDC reserved") + continue + } + } + // Fix CDC availability + if err := cdcs.Unassign(c, cdc); err != nil { + return err + } + if err := cdcs.Reserve(c, cdc, cd.Name, pool.Name); err != nil { + return err + } + + if cd.Spec.Installed { + cdcs.Succeeded(c, cdc) + } else if isBroken(cd, pool, logger) { + cdcs.BrokenByCloud(c, cdc) + } else { + cdcs.InstallationPending(c, cdc) + } + } + } + + // Notice a Broken CD => update the CDC's ApplySucceeded condition to BrokenByCloud; + for _, cd := range cds.Broken() { + cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef + if cdcRef == nil { + continue + } + if cdc, ok := cdcs.byCDCName[cdcRef.Name]; ok { + if err := cdcs.BrokenByCloud(c, cdc); err != nil { + return err + } + } else { + logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "clusterdeploymentcustomization": cdcRef.Name, + "namespace": cd.Spec.ClusterPoolRef.Namespace, + }).Warning("CD has reference to a CDC that doesn't exist, it was forcefully removed or this is a bug") + } + } + + // Notice a CD has finished installing => update the CDC's ApplySucceeded condition to Success; + for _, cd := range cds.Installed() { + cdcRef := cd.Spec.ClusterPoolRef.CustomizationRef + if cdcRef == nil { + continue + } + if cdc, ok := cdcs.byCDCName[cdcRef.Name]; ok { + if err := cdcs.Succeeded(c, cdc); err != nil { + return err + } + } else { + logger.WithFields(log.Fields{ + "clusterdeployment": cd.Name, + "clusterdeploymentcustomization": cdcRef.Name, + "namespace": cd.Spec.ClusterPoolRef.Namespace, + }).Warning("CD has reference to a CDC that doesn't exist, it was forcefully removed or this is a bug") + } + } + + cdcs.Sort() + + // Update ClusterPool InventoryValid condition + if err := cdcs.UpdateInventoryValidCondition(c, pool); err != nil { + return err + } + + return nil +} + +func (cdcs *cdcCollection) UpdateInventoryValidCondition(c client.Client, pool *hivev1.ClusterPool) error { + message := "" + status := corev1.ConditionTrue + reason := hivev1.InventoryReasonValid + if (len(cdcs.syntax) + len(cdcs.cloud) + len(cdcs.missing)) > 0 { + // Send the cdcCollection to our custom marshaller that extracts and marshals just the invalid CDCs. + var b invalidCDCCollection = invalidCDCCollection(*cdcs) + messageByte, err := json.Marshal(&b) + if err != nil { + return err + } + message = string(messageByte) + status = corev1.ConditionFalse + reason = hivev1.InventoryReasonInvalid + } + + conditions, changed := controllerutils.SetClusterPoolConditionWithChangeCheck( + pool.Status.Conditions, + hivev1.ClusterPoolInventoryValidCondition, + status, + reason, + message, + controllerutils.UpdateConditionIfReasonOrMessageChange, + ) + + if changed { + pool.Status.Conditions = conditions + if err := c.Status().Update(context.TODO(), pool); err != nil { + return err + } + } + + return nil +} + +type invalidCDCCollection cdcCollection + +var _ json.Marshaler = &invalidCDCCollection{} + +// MarshalJSON cdcs implements the InventoryValid condition message +func (cdcs *invalidCDCCollection) MarshalJSON() ([]byte, error) { + cloud := []string{} + for _, cdc := range cdcs.cloud { + cloud = append(cloud, cdc.Name) + } + syntax := []string{} + for _, cdc := range cdcs.syntax { + syntax = append(syntax, cdc.Name) + } + sort.Strings(cloud) + sort.Strings(syntax) + sort.Strings(cdcs.missing) + + return json.Marshal(&struct { + BrokenByCloud []string + BrokenBySyntax []string + Missing []string + }{ + BrokenByCloud: cloud, + BrokenBySyntax: syntax, + Missing: cdcs.missing, + }) +} + // setCDsCurrentCondition idempotently sets the ClusterDeploymentsCurrent condition on the // ClusterPool according to whether all unassigned CDs have the same PoolVersion as the pool. func setCDsCurrentCondition(c client.Client, cds *cdCollection, clp *hivev1.ClusterPool, poolVersion string) error { diff --git a/pkg/operator/assets/bindata.go b/pkg/operator/assets/bindata.go index e22ae4d68b5..cbbc1187782 100644 --- a/pkg/operator/assets/bindata.go +++ b/pkg/operator/assets/bindata.go @@ -1125,6 +1125,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates @@ -1139,6 +1140,7 @@ rules: - hiveconfigs - selectorsyncsets - selectorsyncidentityproviders + - clusterdeploymentcustomizations verbs: - get - list @@ -1508,6 +1510,7 @@ rules: - syncsets - syncsetinstances - clusterdeprovisions + - clusterdeploymentcustomizations # TODO: remove once v1alpha1 compat removed - clusterdeprovisionrequests - clusterstates diff --git a/pkg/test/assert/assertions.go b/pkg/test/assert/assertions.go index 3c2432fac3a..c6391e4e8ca 100644 --- a/pkg/test/assert/assertions.go +++ b/pkg/test/assert/assertions.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" ) @@ -55,6 +56,15 @@ func findClusterDeploymentCondition(conditions []hivev1.ClusterDeploymentConditi return nil } +func findCDCCondition(conditions []conditionsv1.Condition, conditionType conditionsv1.ConditionType) *conditionsv1.Condition { + for i, condition := range conditions { + if condition.Type == conditionType { + return &conditions[i] + } + } + return nil +} + // AssertConditionStatus asserts if a condition is present on the cluster deployment and has the expected status func AssertConditionStatus(t *testing.T, cd *hivev1.ClusterDeployment, condType hivev1.ClusterDeploymentConditionType, status corev1.ConditionStatus) { condition := findClusterDeploymentCondition(cd.Status.Conditions, condType) @@ -80,6 +90,23 @@ func AssertConditions(t *testing.T, cd *hivev1.ClusterDeployment, expectedCondit } } +// AssertConditions asserts if the expected conditions are present on the cluster deployment. +// It also asserts if those conditions have the expected status, reason, and (optionally) message. +func AssertCDCConditions(t *testing.T, cdc *hivev1.ClusterDeploymentCustomization, expectedConditions []conditionsv1.Condition) { + testifyassert.LessOrEqual(t, len(expectedConditions), len(cdc.Status.Conditions), "some conditions are not present") + for _, expectedCond := range expectedConditions { + condition := findCDCCondition(cdc.Status.Conditions, expectedCond.Type) + if testifyassert.NotNilf(t, condition, "did not find expected condition type: %v", expectedCond.Type) { + testifyassert.Equal(t, expectedCond.Status, condition.Status, "condition found with unexpected status") + testifyassert.Equal(t, expectedCond.Reason, condition.Reason, "condition found with unexpected reason") + // Optionally validate the message + if expectedCond.Message != "" { + testifyassert.Equal(t, expectedCond.Message, condition.Message, "condition found with unexpected message") + } + } + } +} + // AssertEqualWhereItCounts compares two runtime.Objects, ignoring their ResourceVersion and TypeMeta, asserting that they // are otherwise equal. // This and cleanRVAndTypeMeta were borrowed/adapted from: diff --git a/pkg/test/clusterdeployment/clusterdeployment.go b/pkg/test/clusterdeployment/clusterdeployment.go index fc270e59730..3a2cc2215f1 100644 --- a/pkg/test/clusterdeployment/clusterdeployment.go +++ b/pkg/test/clusterdeployment/clusterdeployment.go @@ -3,6 +3,7 @@ package clusterdeployment import ( "time" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -243,3 +244,9 @@ func WithClusterMetadata(clusterMetadata *hivev1.ClusterMetadata) Option { clusterDeployment.Spec.ClusterMetadata = clusterMetadata } } + +func WithCustomization(cdcName string) Option { + return func(clusterDeployment *hivev1.ClusterDeployment) { + clusterDeployment.Spec.ClusterPoolRef.CustomizationRef = &corev1.LocalObjectReference{Name: cdcName} + } +} diff --git a/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go new file mode 100644 index 00000000000..1fe66ad96d2 --- /dev/null +++ b/pkg/test/clusterdeploymentcustomization/clusterdeploymentcustomization.go @@ -0,0 +1,150 @@ +package clusterdeploymentcustomization + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" + + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + hivev1 "github.com/openshift/hive/apis/hive/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/hive/pkg/test/generic" +) + +// Option defines a function signature for any function that wants to be passed into Build +type Option func(*hivev1.ClusterDeploymentCustomization) + +// Build runs each of the functions passed in to generate the object. +func Build(opts ...Option) *hivev1.ClusterDeploymentCustomization { + retval := &hivev1.ClusterDeploymentCustomization{} + for _, o := range opts { + o(retval) + } + + return retval +} + +type Builder interface { + Build(opts ...Option) *hivev1.ClusterDeploymentCustomization + + Options(opts ...Option) Builder + + GenericOptions(opts ...generic.Option) Builder +} + +func BasicBuilder() Builder { + return &builder{} +} + +func FullBuilder(namespace, name string, typer runtime.ObjectTyper) Builder { + b := &builder{} + return b.GenericOptions( + generic.WithTypeMeta(typer), + generic.WithResourceVersion("1"), + generic.WithNamespace(namespace), + generic.WithName(name), + ) +} + +type builder struct { + options []Option +} + +func (b *builder) Build(opts ...Option) *hivev1.ClusterDeploymentCustomization { + return Build(append(b.options, opts...)...) +} + +func (b *builder) Options(opts ...Option) Builder { + return &builder{ + options: append(b.options, opts...), + } +} + +func (b *builder) GenericOptions(opts ...generic.Option) Builder { + options := make([]Option, len(opts)) + for i, o := range opts { + options[i] = Generic(o) + } + return b.Options(options...) +} + +// Generic allows common functions applicable to all objects to be used as Options to Build +func Generic(opt generic.Option) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + opt(cdc) + } +} + +func Available() Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.Conditions = append(cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "available", + }) + } +} + +func Reserved() Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.Conditions = append(cdc.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Reserved", + Message: "reserved", + }) + } +} + +func WithPatch(path, op, value string) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Spec.InstallConfigPatches = append(cdc.Spec.InstallConfigPatches, hivev1.PatchEntity{ + Path: path, + Op: op, + Value: value, + }) + } +} + +func WithApplySucceeded(reason string, change time.Time) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + status := corev1.ConditionTrue + if reason != hivev1.CustomizationApplyReasonSucceeded { + status = corev1.ConditionFalse + } + + if cdc.Status.Conditions == nil { + cdc.Status.Conditions = []conditionsv1.Condition{} + } + existingCondition := conditionsv1.FindStatusCondition(cdc.Status.Conditions, hivev1.ApplySucceededCondition) + if existingCondition == nil { + newCondition := conditionsv1.Condition{ + Type: hivev1.ApplySucceededCondition, + Status: status, + Reason: reason, + Message: reason, + } + newCondition.LastTransitionTime = metav1.NewTime(change) + cdc.Status.Conditions = append(cdc.Status.Conditions, newCondition) + } else { + existingCondition.LastTransitionTime = metav1.NewTime(change) + existingCondition.Status = status + existingCondition.Reason = reason + } + } +} + +func WithPool(name string) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.ClusterPoolRef = &corev1.LocalObjectReference{Name: name} + } +} + +func WithCD(name string) Option { + return func(cdc *hivev1.ClusterDeploymentCustomization) { + cdc.Status.ClusterDeploymentRef = &corev1.LocalObjectReference{Name: name} + } +} diff --git a/pkg/test/clusterpool/clusterpool.go b/pkg/test/clusterpool/clusterpool.go index 8d156a4ce55..c2f481f80ff 100644 --- a/pkg/test/clusterpool/clusterpool.go +++ b/pkg/test/clusterpool/clusterpool.go @@ -184,3 +184,17 @@ func WithRunningCount(size int) Option { clusterPool.Spec.RunningCount = int32(size) } } + +func WithInventory(cdcs []string) Option { + return func(clusterPool *hivev1.ClusterPool) { + if len(cdcs) == 0 { + clusterPool.Spec.Inventory = nil + } else { + inventory := []hivev1.InventoryEntry{} + for _, cdc := range cdcs { + inventory = append(inventory, hivev1.InventoryEntry{Name: cdc}) + } + clusterPool.Spec.Inventory = inventory + } + } +} diff --git a/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go new file mode 100644 index 00000000000..2f2d31da7eb --- /dev/null +++ b/pkg/validating-webhooks/hive/v1/clusterdeploymentcustomization_validating_admission_hook.go @@ -0,0 +1,286 @@ +package v1 + +import ( + "fmt" + "net/http" + "strings" + + yamlpatch "github.com/krishicks/yaml-patch" + log "github.com/sirupsen/logrus" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hivev1 "github.com/openshift/hive/apis/hive/v1" +) + +const ( + clusterDeploymentCustomizationGroup = "hive.openshift.io" + clusterDeploymentCustomizationVersion = "v1" + clusterDeploymentCustomizationResource = "clusterdeploymentcustomization" + + clusterDeploymentCustomizationAdmissionGroup = "admission.hive.openshift.io" + clusterDeploymentCustomizationAdmissionVersion = "v1" +) + +// ClusterDeploymentCustomizationlValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. +type ClusterDeploymentCustomizationValidatingAdmissionHook struct { + decoder *admission.Decoder +} + +// NewClusterDeploymentCustomizationValidatingAdmissionHook constructs a new ClusterDeploymentCustomizationValidatingAdmissionHook +func NewClusterDeploymentCustomizationValidatingAdmissionHook(decoder *admission.Decoder) *ClusterDeploymentCustomizationValidatingAdmissionHook { + return &ClusterDeploymentCustomizationValidatingAdmissionHook{ + decoder: decoder, + } +} + +// ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the +// webhook is accessed by the kube apiserver. +// For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterdeploymentcustomizationvalidators". +// When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Registering validation REST resource") + + // NOTE: This GVR is meant to be different than the ClusterDeploymentCustomization CRD GVR which has group "hive.openshift.io". + return schema.GroupVersionResource{ + Group: clusterDeploymentCustomizationAdmissionGroup, + Version: clusterDeploymentCustomizationAdmissionVersion, + Resource: "clusterdeploymentcustomizationvalidators", + }, + "clusterdeploymentcustomizationvalidator" +} + +// Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { + log.WithFields(log.Fields{ + "group": clusterDeploymentCustomizationAdmissionGroup, + "version": clusterDeploymentCustomizationAdmissionVersion, + "resource": "clusterdeploymentcustomizationvalidator", + }).Info("Initializing validation REST resource") + return nil // No initialization needed right now. +} + +// Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. +// Usually it's the kube apiserver that is making the admission validation request. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "Validate", + }) + + if !a.shouldValidate(admissionSpec) { + contextLogger.Info("Skipping validation for request") + // The request object isn't something that this validator should validate. + // Therefore, we say that it's Allowed. + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + + contextLogger.Info("Validating request") + + switch admissionSpec.Operation { + case admissionv1beta1.Create: + return a.validateCreate(admissionSpec) + case admissionv1beta1.Update: + return a.validateUpdate(admissionSpec) + default: + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } +} + +// shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check +// the validity of some other type of object with a different GVR. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "shouldValidate", + }) + + if admissionSpec.Resource.Group != clusterDeploymentCustomizationGroup { + contextLogger.Info("Returning False, not our group") + return false + } + + if admissionSpec.Resource.Version != clusterDeploymentCustomizationVersion { + contextLogger.Info("Returning False, it's our group, but not the right version") + return false + } + + if admissionSpec.Resource.Resource != clusterDeploymentCustomizationResource { + contextLogger.Info("Returning False, it's our group and version, but not the right resource") + return false + } + + // If we get here, then we're supposed to validate the object. + contextLogger.Debug("Returning True, passed all prerequisites.") + return true +} + +// validateCreate specifically validates create operations for ClusterDeploymentCustomization objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateCreate", + }) + + cdc := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, cdc); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = cdc.Name + + // TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd) + + if len(cdc.Name) > validation.DNS1123LabelMaxLength { + message := fmt.Sprintf("Invalid cluster deployment customization name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) + contextLogger.Error(message) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: message, + }, + } + } + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath.Child("installConfigPatches"), cdc.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +// validateUpdate specifically validates update operations for ClusterDeployment objects. +func (a *ClusterDeploymentCustomizationValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + contextLogger := log.WithFields(log.Fields{ + "operation": admissionSpec.Operation, + "group": admissionSpec.Resource.Group, + "version": admissionSpec.Resource.Version, + "resource": admissionSpec.Resource.Resource, + "method": "validateUpdate", + }) + + newObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.Object, newObject); err != nil { + contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["object.Name"] = newObject.Name + + oldObject := &hivev1.ClusterDeploymentCustomization{} + if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { + contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, + Message: err.Error(), + }, + } + } + + // Add the new data to the contextLogger + contextLogger.Data["oldObject.Name"] = oldObject.Name + + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateInstallConfigPatches(specPath, newObject.Spec.InstallConfigPatches)...) + + if len(allErrs) > 0 { + contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") + status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() + return &admissionv1beta1.AdmissionResponse{ + Allowed: false, + Result: &status, + } + } + + // If we get here, then all checks passed, so the object is valid. + contextLogger.Info("Successful validation") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } +} + +func validateInstallConfigPatches(path *field.Path, patches []hivev1.PatchEntity) field.ErrorList { + allErrs := field.ErrorList{} + + for i, patch := range patches { + if !isValidOP(yamlpatch.Op(patch.Op)) { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch op must be a valid json patch operation")) + } + if len(patch.Path) == 0 || !strings.HasPrefix(patch.Path, "/") { + allErrs = append(allErrs, field.Invalid(path.Index(i), patch, "install config patch path must start with '/'")) + } + } + return allErrs +} + +func isValidOP(op yamlpatch.Op) bool { + switch op { + case + yamlpatch.OpAdd, + yamlpatch.OpRemove, + yamlpatch.OpMove, + yamlpatch.OpCopy, + yamlpatch.OpTest, + yamlpatch.OpReplace: + return true + } + return false +} diff --git a/vendor/github.com/krishicks/yaml-patch/README.md b/vendor/github.com/krishicks/yaml-patch/README.md index a782e6397c9..8cc7b4b2b64 100644 --- a/vendor/github.com/krishicks/yaml-patch/README.md +++ b/vendor/github.com/krishicks/yaml-patch/README.md @@ -1,9 +1,79 @@ # yaml-patch +### **Note: This repo is for all intents and purposes abandoned. I would suggest using [ytt](https://get-ytt.io/) instead!** + `yaml-patch` is a version of Evan Phoenix's [json-patch](https://github.com/evanphx/json-patch), which is an implementation of [JavaScript Object Notation (JSON) Patch](https://tools.ietf.org/html/rfc6902), -but for YAML. +directly transposed to YAML. + + +## Syntax + +General syntax is the following: + +```yaml +- op: + from: # only valid for the 'move' and 'copy' operations + path: # always mandatory + value: # only valid for 'add', 'replace' and 'test' operations +``` + +### Paths + +Supported YAML path are primarily those of +[RFC 6901 JSON Pointers](https://tools.ietf.org/html/rfc6901). + +A syntax extention with `=` was added to match any sub-element in a YAML +structure by key/value. + +For example, the following removes all sub-nodes of the `releases` array that +have a `name` key with a value of `cassandra`: + +```yaml +- op: remove + path: /releases/name=cassandra +``` + +A major caveat with `=`, is that it actually performs a _recursive_ search for +matching nodes. The root node at which the recursive search is initiated, is +the node matched by the path prefix before `=`. + +The second caveat is that the recursion stops at a matching node. With the +`add` operation, you could expect sub-nodes of matching nodes to also match, +but they don't. + +If your document is the following and you apply the patch above, then all +sub-nodes of `/releases` that match `name=cassandra` will be removed. + +```yaml +releases: # a recursive search is made, starting from this node + - name: cassandra # does match, will be removed + - - name: toto + - name: cassandra # does match, will be removed! + sub: + - name: cassandra # not matched: the recursion stops at matching parent node + - super: + sub: + name: cassandra # does match, will be removed! +``` + +#### Path Escaping + +As in RFC 6901, escape sequences are introduced by `~`. So, `~` is escaped +`~0`, `/` is escaped `~1`. There is no escape for `=` yet. + + +### Operations + +Supported patch operations are those of [RFC 6902](https://tools.ietf.org/html/rfc6902). + +- [`add`](https://tools.ietf.org/html/rfc6902#section-4.1) +- [`remove`](https://tools.ietf.org/html/rfc6902#section-4.2) +- [`replace`](https://tools.ietf.org/html/rfc6902#section-4.3) +- [`move`](https://tools.ietf.org/html/rfc6902#section-4.4) +- [`copy`](https://tools.ietf.org/html/rfc6902#section-4.5) +- [`test`](https://tools.ietf.org/html/rfc6902#section-4.6) ## Installing diff --git a/vendor/github.com/krishicks/yaml-patch/container.go b/vendor/github.com/krishicks/yaml-patch/container.go index bdc22f143de..4a5b275aea9 100644 --- a/vendor/github.com/krishicks/yaml-patch/container.go +++ b/vendor/github.com/krishicks/yaml-patch/container.go @@ -8,20 +8,36 @@ import ( // Container is the interface for performing operations on Nodes type Container interface { - Get(key string) (*Node, error) - Set(key string, val *Node) error - Add(key string, val *Node) error - Remove(key string) error + Get(keyOrIndex string) (*Node, error) + Set(keyOrIndex string, val *Node) error + Add(keyOrIndex string, val *Node) error + Remove(keyOrIndex string) error } type nodeMap map[interface{}]*Node +func (n *nodeMap) setAtRoot(val *Node) error { + switch vt := val.Container().(type) { + case *nodeMap: + for k, v := range *vt { + (*n)[k] = v + } + } + return nil +} + func (n *nodeMap) Set(key string, val *Node) error { + if len(key) == 0 { + return n.setAtRoot(val) + } (*n)[key] = val return nil } func (n *nodeMap) Add(key string, val *Node) error { + if len(key) == 0 { + return n.setAtRoot(val) + } (*n)[key] = val return nil } diff --git a/vendor/github.com/krishicks/yaml-patch/node.go b/vendor/github.com/krishicks/yaml-patch/node.go index 4837c8a983e..ae790f0cec7 100644 --- a/vendor/github.com/krishicks/yaml-patch/node.go +++ b/vendor/github.com/krishicks/yaml-patch/node.go @@ -1,6 +1,8 @@ package yamlpatch -import "reflect" +import ( + "reflect" +) // Node holds a YAML document that has not yet been processed into a NodeMap or // NodeSlice @@ -16,9 +18,33 @@ func NewNode(raw *interface{}) *Node { } } +// NewNodeFromMap returns a new Node based on a map[interface{}]interface{} +func NewNodeFromMap(m map[interface{}]interface{}) *Node { + var raw interface{} + raw = m + + return &Node{ + raw: &raw, + } +} + +// NewNodeFromSlice returns a new Node based on a []interface{} +func NewNodeFromSlice(s []interface{}) *Node { + var raw interface{} + raw = s + + return &Node{ + raw: &raw, + } +} + // MarshalYAML implements yaml.Marshaler, and returns the correct interface{} // to be marshaled func (n *Node) MarshalYAML() (interface{}, error) { + if n == nil { + return nil, nil + } + if n.container != nil { return n.container, nil } @@ -41,7 +67,7 @@ func (n *Node) UnmarshalYAML(unmarshal func(interface{}) error) error { // Empty returns whether the raw value is nil func (n *Node) Empty() bool { - return *n.raw == nil + return n == nil || *n.raw == nil } // Container returns the node as a Container @@ -74,6 +100,10 @@ func (n *Node) Container() Container { // Equal compares the values of the raw interfaces that the YAML was // unmarshaled into func (n *Node) Equal(other *Node) bool { + if n == nil { + return other == nil + } + return reflect.DeepEqual(*n.raw, *other.raw) } diff --git a/vendor/github.com/krishicks/yaml-patch/operation.go b/vendor/github.com/krishicks/yaml-patch/operation.go index 69353c77d6d..8575076dba0 100644 --- a/vendor/github.com/krishicks/yaml-patch/operation.go +++ b/vendor/github.com/krishicks/yaml-patch/operation.go @@ -11,18 +11,23 @@ type Op string // Ops const ( - opAdd Op = "add" - opRemove Op = "remove" - opReplace Op = "replace" - opMove Op = "move" - opCopy Op = "copy" - opTest Op = "test" + OpAdd Op = "add" + OpRemove Op = "remove" + OpReplace Op = "replace" + OpMove Op = "move" + OpCopy Op = "copy" + OpTest Op = "test" +) + +const ( + rootPath = "/" ) // OpPath is an RFC6902 'pointer' type OpPath string // Decompose returns the pointer's components: +// "/" => [], "" // "/foo" => [], "foo" // "/foo/1" => ["foo"], "1" // "/foo/1/bar" => ["foo", "1"], "bar" @@ -64,17 +69,17 @@ func (o *Operation) Perform(c Container) error { var err error switch o.Op { - case opAdd: + case OpAdd: err = tryAdd(c, o) - case opRemove: + case OpRemove: err = tryRemove(c, o) - case opReplace: + case OpReplace: err = tryReplace(c, o) - case opMove: + case OpMove: err = tryMove(c, o) - case opCopy: + case OpCopy: err = tryCopy(c, o) - case opTest: + case OpTest: err = tryTest(c, o) default: err = fmt.Errorf("Unexpected op: %s", o.Op) @@ -84,6 +89,10 @@ func (o *Operation) Perform(c Container) error { } func tryAdd(doc Container, op *Operation) error { + if op.Path == rootPath { + return doc.Add("", op.Value) + } + con, key, err := findContainer(doc, &op.Path) if err != nil { return fmt.Errorf("yamlpatch add operation does not apply: doc is missing path: %s", op.Path) @@ -169,7 +178,7 @@ func tryTest(doc Container, op *Operation) error { return err } - if op.Value.Empty() && val == nil { + if op.Value == nil && val.Empty() { return nil } diff --git a/vendor/github.com/krishicks/yaml-patch/patch.go b/vendor/github.com/krishicks/yaml-patch/patch.go index 910f39eb952..ec8f11c8346 100644 --- a/vendor/github.com/krishicks/yaml-patch/patch.go +++ b/vendor/github.com/krishicks/yaml-patch/patch.go @@ -1,7 +1,9 @@ package yamlpatch import ( + "bytes" "fmt" + "io" yaml "gopkg.in/yaml.v2" ) @@ -23,38 +25,59 @@ func DecodePatch(bs []byte) (Patch, error) { // Apply returns a YAML document that has been mutated per the patch func (p Patch) Apply(doc []byte) ([]byte, error) { - var iface interface{} - err := yaml.Unmarshal(doc, &iface) - if err != nil { - return nil, fmt.Errorf("failed unmarshaling doc: %s\n\n%s", string(doc), err) - } + decoder := yaml.NewDecoder(bytes.NewReader(doc)) + buf := bytes.NewBuffer([]byte{}) + encoder := yaml.NewEncoder(buf) - var c Container - c = NewNode(&iface).Container() - - for _, op := range p { - pathfinder := NewPathFinder(c) - if op.Path.ContainsExtendedSyntax() { - paths := pathfinder.Find(string(op.Path)) - if paths == nil { - return nil, fmt.Errorf("could not expand pointer: %s", op.Path) + for { + var iface interface{} + err := decoder.Decode(&iface) + if err != nil { + if err == io.EOF { + break } - for _, path := range paths { - newOp := op - newOp.Path = OpPath(path) - err = newOp.Perform(c) + return nil, fmt.Errorf("failed to decode doc: %s\n\n%s", string(doc), err) + } + + var c Container + c = NewNode(&iface).Container() + + for _, op := range p { + pathfinder := NewPathFinder(c) + if op.Path.ContainsExtendedSyntax() { + paths := pathfinder.Find(string(op.Path)) + if paths == nil { + return nil, fmt.Errorf("could not expand pointer: %s", op.Path) + } + + for i := len(paths) - 1; i >= 0; i-- { + path := paths[i] + newOp := op + newOp.Path = OpPath(path) + err := newOp.Perform(c) + if err != nil { + return nil, err + } + } + } else { + err := op.Perform(c) if err != nil { return nil, err } } - } else { - err = op.Perform(c) - if err != nil { - return nil, err - } } + + err = encoder.Encode(c) + if err != nil { + return nil, fmt.Errorf("failed to encode container: %s", err) + } + } + + err := encoder.Close() + if err != nil { + return nil, err } - return yaml.Marshal(c) + return buf.Bytes(), nil } diff --git a/vendor/github.com/krishicks/yaml-patch/pathfinder.go b/vendor/github.com/krishicks/yaml-patch/pathfinder.go index 06cfb133347..3a4f27aab81 100644 --- a/vendor/github.com/krishicks/yaml-patch/pathfinder.go +++ b/vendor/github.com/krishicks/yaml-patch/pathfinder.go @@ -3,6 +3,8 @@ package yamlpatch import ( "fmt" "strings" + + yaml "gopkg.in/yaml.v2" ) // PathFinder can be used to find RFC6902-standard paths given non-standard @@ -19,6 +21,11 @@ func NewPathFinder(container Container) *PathFinder { } } +type route struct { + key string + value Container +} + // Find expands the given path into all matching paths, returning the canonical // versions of those matching paths func (p *PathFinder) Find(path string) []string { @@ -28,8 +35,8 @@ func (p *PathFinder) Find(path string) []string { return []string{"/"} } - routes := map[string]Container{ - "": p.root, + routes := []route { + route{"", p.root}, } for _, part := range parts[1:] { @@ -37,73 +44,72 @@ func (p *PathFinder) Find(path string) []string { } var paths []string - for k := range routes { - paths = append(paths, k) + for _, r := range routes { + paths = append(paths, r.key) } return paths } -func find(part string, routes map[string]Container) map[string]Container { - matches := map[string]Container{} +func find(part string, routes []route) (matches []route) { + for _, r := range routes { + prefix := r.key + container := r.value - for prefix, container := range routes { if part == "-" { - for k := range routes { - matches[fmt.Sprintf("%s/-", k)] = routes[k] + for _, r = range routes { + matches = append(matches, route {fmt.Sprintf("%s/-", r.key), r.value}) } - return matches + return } if kv := strings.Split(part, "="); len(kv) == 2 { - if newMatches := findAll(prefix, kv[0], kv[1], container); len(newMatches) > 0 { + decoder := yaml.NewDecoder(strings.NewReader(kv[1])) + var value interface{} + if decoder.Decode(&value) != nil { + value = kv[1] + } + + if newMatches := findAll(prefix, kv[0], value, container); len(newMatches) > 0 { matches = newMatches } continue } - if node, err := container.Get(part); err == nil { + if node, err := container.Get(part); err == nil && node != nil { path := fmt.Sprintf("%s/%s", prefix, part) - if node == nil { - matches[path] = container - } else { - matches[path] = node.Container() - } + matches = append(matches, route {path, node.Container()}) } } - return matches + return } -func findAll(prefix, findKey, findValue string, container Container) map[string]Container { +func findAll(prefix, findKey string, findValue interface{}, container Container) (matches []route) { if container == nil { return nil } if v, err := container.Get(findKey); err == nil && v != nil { - if vs, ok := v.Value().(string); ok && vs == findValue { - return map[string]Container{ - prefix: container, - } + if v.Value() == findValue { + return []route {route{prefix, container}} } } - matches := map[string]Container{} - switch it := container.(type) { case *nodeMap: for k, v := range *it { - for route, match := range findAll(fmt.Sprintf("%s/%s", prefix, k), findKey, findValue, v.Container()) { - matches[route] = match + for _, r := range findAll(fmt.Sprintf("%s/%s", prefix, k), findKey, findValue, v.Container()) { + matches = append(matches, r) } } case *nodeSlice: for i, v := range *it { - for route, match := range findAll(fmt.Sprintf("%s/%d", prefix, i), findKey, findValue, v.Container()) { - matches[route] = match + for _, r := range findAll(fmt.Sprintf("%s/%d", prefix, i), findKey, findValue, v.Container()) { + matches = append(matches, r) } } } - return matches + return } diff --git a/vendor/github.com/openshift/custom-resource-status/LICENSE b/vendor/github.com/openshift/custom-resource-status/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go new file mode 100644 index 00000000000..7f98c60a063 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go @@ -0,0 +1,114 @@ +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// The return value indicates if this resulted in any changes *other than* LastHeartbeatTime. +func SetStatusCondition(conditions *[]Condition, newCondition Condition) bool { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return true + } + + changed := updateCondition(existingCondition, newCondition) + existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + return changed +} + +// SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition +// without setting lastHeartbeatTime. +// The return value indicates if this resulted in any changes. +func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) bool { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return true + } + + return updateCondition(existingCondition, newCondition) +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) { + if conditions == nil { + return + } + newConditions := []Condition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func updateCondition(existingCondition *Condition, newCondition Condition) bool { + changed := false + if existingCondition.Status != newCondition.Status { + changed = true + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + if existingCondition.Reason != newCondition.Reason { + changed = true + existingCondition.Reason = newCondition.Reason + } + if existingCondition.Message != newCondition.Message { + changed = true + existingCondition.Message = newCondition.Message + } + return changed +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue` +func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse` +func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse) +} + +// IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown` +func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go new file mode 100644 index 00000000000..b657efeaa65 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1 provides version v1 of the types and functions necessary to +// manage and inspect a slice of conditions. It is opinionated in the +// condition types provided but leaves it to the user to define additional +// types as necessary. +package v1 diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go new file mode 100644 index 00000000000..950678fb970 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go @@ -0,0 +1,51 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Condition represents the state of the operator's +// reconciliation functionality. +// +k8s:deepcopy-gen=true +type Condition struct { + Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."` + + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"` + + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"` +} + +// ConditionType is the state of the operator's reconciliation functionality. +type ConditionType string + +const ( + // ConditionAvailable indicates that the resources maintained by the operator, + // is functional and available in the cluster. + ConditionAvailable ConditionType = "Available" + + // ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the + // operator + ConditionProgressing ConditionType = "Progressing" + + // ConditionDegraded indicates that the resources maintained by the operator are not functioning completely. + // An example of a degraded state would be if not all pods in a deployment were running. + // It may still be available, but it is degraded + ConditionDegraded ConditionType = "Degraded" + + // ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade. + // When `False`, the resources maintained by the operator should not be upgraded and the + // message field should contain a human readable description of what the administrator should do to + // allow the operator to successfully update the resources maintained by the operator. + ConditionUpgradeable ConditionType = "Upgradeable" +) diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bbbbf863d13 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go @@ -0,0 +1,23 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go index ec5431eef37..de10b119ed4 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go @@ -276,6 +276,10 @@ type ClusterPoolReference struct { // ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for // ClusterDeployments belonging to ClusterPools. ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"` + // CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment. + // The Customization exists in the ClusterPool namespace. + // +optional + CustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization,omitempty"` } // ClusterMetadata contains metadata information about the installed cluster. diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go new file mode 100644 index 00000000000..8917e756c51 --- /dev/null +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go @@ -0,0 +1,99 @@ +package v1 + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // CustomizationApplyReasonSucceeded indicates that the customization + // worked properly on the last applied cluster deployment. + CustomizationApplyReasonSucceeded = "Succeeded" + // CustomizationApplyReasonBrokenSyntax indicates that Hive failed to apply + // customization patches on install-config. More details would be found in + // ApplySucceded condition message. + CustomizationApplyReasonBrokenSyntax = "BrokenBySyntax" + // CustomizationApplyReasonBrokenCloud indicates that cluster deployment provision has failed + // when using this customization. More details would be found in the ApplySucceeded condition message. + CustomizationApplyReasonBrokenCloud = "BrokenByCloud" + // CustomizationApplyReasonInstallationPending indicates that the customization patches have + // been successfully applied but provisioning is not completed yet. + CustomizationApplyReasonInstallationPending = "InstallationPending" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API. +// +kubebuilder:subresource:status +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +type ClusterDeploymentCustomization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterDeploymentCustomizationSpec `json:"spec"` + Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"` +} + +// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization. +type ClusterDeploymentCustomizationSpec struct { + // InstallConfigPatches is a list of patches to be applied to the install-config. + InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"` +} + +// PatchEntity represent a json patch (RFC 6902) to be applied to the install-config +type PatchEntity struct { + // Op is the operation to perform: add, remove, replace, move, copy, test + // +required + Op string `json:"op"` + // Path is the json path to the value to be modified + // +required + Path string `json:"path"` + // From is the json path to copy or move the value from + // +optional + From string `json:"from,omitempty"` + // Value is the value to be used in the operation + // +required + Value string `json:"value"` +} + +// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization. +type ClusterDeploymentCustomizationStatus struct { + // ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on. + // +optional + ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"` + + // ClusterPoolRef is the name of the current cluster pool the CDC used at. + // +optional + ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"` + + // LastAppliedConfiguration contains the last applied patches to the install-config. + // The information will retain for reference in case the customization is updated. + // +optional + LastAppliedConfiguration string `json:"lastAppliedConfiguration,omitempty"` + + // Conditions describes the state of the operator's reconciliation functionality. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +const ( + ApplySucceededCondition conditionsv1.ConditionType = "ApplySucceeded" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations. +type ClusterDeploymentCustomizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterDeploymentCustomization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{}) +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go index 71e394a212c..37efd25a558 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go @@ -92,6 +92,11 @@ type ClusterPoolSpec struct { // HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool. // +optional HibernationConfig *HibernationConfig `json:"hibernationConfig"` + + // Inventory maintains a list of entries consumed by the ClusterPool + // to customize the default ClusterDeployment. + // +optional + Inventory []InventoryEntry `json:"inventory,omitempty"` } type HibernationConfig struct { @@ -110,6 +115,22 @@ type HibernationConfig struct { ResumeTimeout metav1.Duration `json:"resumeTimeout"` } +// InventoryEntryKind is the Kind of the inventory entry. +// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization +type InventoryEntryKind string + +const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization" + +// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment. +type InventoryEntry struct { + // Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value. + // +kubebuilder:default=ClusterDeploymentCustomization + Kind InventoryEntryKind `json:"kind,omitempty"` + // Name is the name of the referenced resource. + // +required + Name string `json:"name,omitempty"` +} + // ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool. type ClusterPoolClaimLifetime struct { // Default is the default lifetime of the claim when no lifetime is set on the claim itself. @@ -197,6 +218,17 @@ const ( // ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready) // ClusterDeployments in the pool match the current configuration of the ClusterPool. ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent" + // ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid + ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid" +) + +const ( + // InventoryReasonValid is used when all ClusterDeploymentCustomization are + // available and when used the ClusterDeployments are successfully installed. + InventoryReasonValid = "Valid" + // InventoryReasonInvalid is used when there is something wrong with ClusterDeploymentCustomization, for example + // patching issue, provisioning failure, missing, etc. + InventoryReasonInvalid = "Invalid" ) // +genclient diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index 2f96e248910..ac12a798fc7 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -7,6 +7,7 @@ package v1 import ( configv1 "github.com/openshift/api/config/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" agent "github.com/openshift/hive/apis/hive/v1/agent" alibabacloud "github.com/openshift/hive/apis/hive/v1/alibabacloud" aws "github.com/openshift/hive/apis/hive/v1/aws" @@ -676,6 +677,121 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization. +func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeploymentCustomization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList. +func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) { + *out = *in + if in.InstallConfigPatches != nil { + in, out := &in.InstallConfigPatches, &out.InstallConfigPatches + *out = make([]PatchEntity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec. +func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) { + *out = *in + if in.ClusterDeploymentRef != nil { + in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ClusterPoolRef != nil { + in, out := &in.ClusterPoolRef, &out.ClusterPoolRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]conditionsv1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus. +func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentCustomizationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { *out = *in @@ -1341,6 +1457,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) { in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp *out = (*in).DeepCopy() } + if in.CustomizationRef != nil { + in, out := &in.CustomizationRef, &out.CustomizationRef + *out = new(corev1.LocalObjectReference) + **out = **in + } return } @@ -1413,6 +1534,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) { *out = new(HibernationConfig) **out = **in } + if in.Inventory != nil { + in, out := &in.Inventory, &out.Inventory + *out = make([]InventoryEntry, len(*in)) + copy(*out, *in) + } return } @@ -2500,6 +2626,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry. +func (in *InventoryEntry) DeepCopy() *InventoryEntry { + if in == nil { + return nil + } + out := new(InventoryEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) { *out = *in @@ -2989,6 +3131,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchEntity) DeepCopyInto(out *PatchEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity. +func (in *PatchEntity) DeepCopy() *PatchEntity { + if in == nil { + return nil + } + out := new(PatchEntity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in diff --git a/vendor/modules.txt b/vendor/modules.txt index 58a75f8bc84..d8c65f9c2ad 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -681,7 +681,7 @@ github.com/kisielk/gotool github.com/kisielk/gotool/internal/load # github.com/kr/pty v1.1.8 ## explicit; go 1.12 -# github.com/krishicks/yaml-patch v0.0.10 +# github.com/krishicks/yaml-patch v0.0.11-0.20201210192933-7cea92d7f43e ## explicit github.com/krishicks/yaml-patch # github.com/kulti/thelper v0.4.0 @@ -917,6 +917,9 @@ github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1 ## explicit; go 1.16 github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1 github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1beta1 +# github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 +## explicit; go 1.12 +github.com/openshift/custom-resource-status/conditions/v1 # github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480 ## explicit; go 1.13 github.com/openshift/generic-admission-server/pkg/apiserver