forked from BCDevOps/sonarqube
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ZapScan-Integration-Example-Jenkinsfile
186 lines (155 loc) · 5.52 KB
/
ZapScan-Integration-Example-Jenkinsfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
// Wait timeout in minutes
def WAIT_TIMEOUT = 10
// Edit your app's name below
def APP_NAME = 'angular-app'
// Edit your environment TAG names below
def TAG_NAMES = ['dev', 'test', 'prod']
// You shouldn't have to edit these if you're following the conventions
def BUILD_CONFIG = "${APP_NAME}-build"
//EDIT LINE BELOW (Change `IMAGESTREAM_NAME` so it matches the name of your *output*/deployable image stream.)
def IMAGESTREAM_NAME = 'angular-on-nginx'
// you'll need to change this to point to your application component's folder within your repository
def CONTEXT_DIRECTORY = 'tob-web'
// EDIT LINE BELOW
// Add a reference to the RUNTIME_BUILD_CONFIG, if you are using a runtime that needs to be built.
// Otherwise comment out the line and the associated build script.
def RUNTIME_BUILD_CONFIG = 'nginx-runtime'
// EDIT LINE BELOW (Add a reference to the CHAINED_BUILD_CONFIG)
def CHAINED_BUILD_CONFIG = 'angular-on-nginx-build'
// The name of your deployment configuration; used to verify the deployment
def DEPLOYMENT_CONFIG_NAME= 'angular-on-nginx'
// The namespace of you dev deployment environment.
def DEV_NAME_SPACE = 'devex-von-dev'
@NonCPS
boolean triggerBuild(String contextDirectory) {
// Determine if code has changed within the source context directory.
def changeLogSets = currentBuild.changeSets
def filesChangeCnt = 0
for (int i = 0; i < changeLogSets.size(); i++) {
def entries = changeLogSets[i].items
for (int j = 0; j < entries.length; j++) {
def entry = entries[j]
def files = new ArrayList(entry.affectedFiles)
for (int k = 0; k < files.size(); k++) {
def file = files[k]
def filePath = file.path
if (filePath.contains(contextDirectory)) {
filesChangeCnt = 1
k = files.size()
j = entries.length
}
}
}
}
if ( filesChangeCnt < 1 ) {
echo('The changes do not require a build.')
return false
}
else {
echo('The changes require a build.')
return true
}
}
// Get an image's hash tag
String getImageTagHash(String imageName, String tag = "") {
if(!tag?.trim()) {
tag = "latest"
}
def istag = openshift.raw("get istag ${imageName}:${tag} -o template --template='{{.image.dockerImageReference}}'")
return istag.out.tokenize('@')[1].trim()
}
void build(String buildConfigName, int waitTimeout) {
// Find all of the build configurations associated to the application ...
def buildconfigs = openshift.selector("bc", "${buildConfigName}")
echo "Found ${buildconfigs.count()} buildconfigs: ${buildconfigs.names()}"
// Kick off all the builds in parallel ...
def builds = buildconfigs.startBuild()
echo "Started ${builds.count()} builds: ${builds.names()}"
timeout(waitTimeout) {
// Wait for all the builds to complete ...
// This section will exit after the last build completes.
echo "Waiting for builds to complete ..."
builds.withEach {
// untilEach and watch - do not support watching multiple named resources,
// so we have to feed it one at a time.
it.untilEach(1) {
echo "${it.object().status.phase} - ${it.name()}"
return (it.object().status.phase == "Complete")
}
}
}
echo "Builds complete ..."
}
node {
if( triggerBuild(CONTEXT_DIRECTORY) ) {
stage("Build ${BUILD_CONFIG}") {
script {
openshift.withCluster() {
openshift.withProject() {
echo "Building the application artifacts ..."
build("${BUILD_CONFIG}", WAIT_TIMEOUT)
}
}
}
}
// Build the runtime image, if you are not using an off the shelf one.
stage("Build ${RUNTIME_BUILD_CONFIG}") {
script {
openshift.withCluster() {
openshift.withProject() {
echo "Building the ${RUNTIME_BUILD_CONFIG} image ..."
build("${RUNTIME_BUILD_CONFIG}", WAIT_TIMEOUT)
}
}
}
}
stage("Build ${IMAGESTREAM_NAME}") {
script {
openshift.withCluster() {
openshift.withProject() {
echo "Building the ${IMAGESTREAM_NAME} image ..."
build("${CHAINED_BUILD_CONFIG}", WAIT_TIMEOUT)
}
}
}
}
stage("Deploy ${TAG_NAMES[0]}") {
script {
openshift.withCluster() {
openshift.withProject() {
echo "Tagging ${IMAGESTREAM_NAME} for deployment to ${TAG_NAMES[0]} ..."
// Don't tag with BUILD_ID so the pruner can do it's job; it won't delete tagged images.
// Tag the images for deployment based on the image's hash
def IMAGE_HASH = getImageTagHash("${IMAGESTREAM_NAME}")
echo "IMAGE_HASH: ${IMAGE_HASH}"
openshift.tag("${IMAGESTREAM_NAME}@${IMAGE_HASH}", "${IMAGESTREAM_NAME}:${TAG_NAMES[0]}")
}
openshift.withProject("${DEV_NAME_SPACE}") {
def dc = openshift.selector('dc', "${DEPLOYMENT_CONFIG_NAME}")
// Wait for the deployment to complete.
// This will wait until the desired replicas are all available
dc.rollout().status()
}
echo "Deployment Complete."
}
}
}
stage('Trigger ZAP Scan') {
script {
openshift.withCluster() {
openshift.withProject() {
echo "Triggering an asynchronous ZAP Scan ..."
def zapScan = openshift.selector("bc", "zap-pipeline")
zapScan.startBuild()
}
}
}
}
}
else {
stage('No Changes') {
echo "No changes ..."
currentBuild.result = 'SUCCESS'
}
}
}