Skip to content

Commit e41a0ac

Browse files
Merge pull request #19033 from coreydaley/github_7102_database_replication_tests_should_work_remotely
Automatic merge from submit-queue (batch tested with PRs 19057, 19033). Updates db replica extended tests to use nfs backed persistent volumes This pull request is intended to build upon the NFS backed persistent volumes introduced in #18708 for the Jenkins extended tests. The main goal is to remove the dependency on host path volumes for persistent volumes in our tests so that they can be run in parallel across a cluster. Fixes #7102
2 parents 4eca6da + b8ea89f commit e41a0ac

File tree

10 files changed

+255
-256
lines changed

10 files changed

+255
-256
lines changed

test/extended/builds/pipeline.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ var _ = g.Describe("[Feature:Builds][Slow] openshift pipeline build", func() {
9393

9494
// create persistent volumes if running persistent jenkins
9595
if jenkinsTemplatePath == jenkinsPersistentTemplatePath {
96-
_, err := exutil.SetupNFSBackedPersistentVolume(oc, "2Gi")
96+
_, err := exutil.SetupNFSBackedPersistentVolumes(oc, "2Gi", 1)
9797
o.Expect(err).NotTo(o.HaveOccurred())
9898
}
9999

@@ -998,7 +998,8 @@ var _ = g.Describe("[Feature:Builds][Slow] openshift pipeline build", func() {
998998
})
999999

10001000
g.AfterEach(func() {
1001-
defer exutil.RemoveNFSBackedPersistentVolume(oc)
1001+
defer exutil.RemoveDeploymentConfigs(oc, "jenkins")
1002+
defer exutil.RemoveNFSBackedPersistentVolumes(oc)
10021003

10031004
if g.CurrentGinkgoTestDescription().Failed {
10041005
exutil.DumpPodStates(oc)

test/extended/image_ecosystem/mongodb_replica_statefulset.go

Lines changed: 82 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import (
1010
exutil "github.com/openshift/origin/test/extended/util"
1111
dbutil "github.com/openshift/origin/test/extended/util/db"
1212
kapiv1 "k8s.io/api/core/v1"
13+
e2e "k8s.io/kubernetes/test/e2e/framework"
1314
)
1415

1516
var _ = g.Describe("[Conformance][image_ecosystem][mongodb][Slow] openshift mongodb replication (with statefulset)", func() {
@@ -22,103 +23,97 @@ var _ = g.Describe("[Conformance][image_ecosystem][mongodb][Slow] openshift mong
2223
g.Context("", func() {
2324
g.BeforeEach(func() {
2425
exutil.DumpDockerInfo()
26+
_, err := exutil.SetupNFSBackedPersistentVolumes(oc, "256Mi", 3)
27+
o.Expect(err).NotTo(o.HaveOccurred())
2528
})
2629

2730
g.AfterEach(func() {
31+
defer exutil.RemoveNFSBackedPersistentVolumes(oc)
32+
defer exutil.RemoveStatefulSets(oc, "mongodb-replicaset")
33+
2834
if g.CurrentGinkgoTestDescription().Failed {
2935
exutil.DumpPodStates(oc)
3036
exutil.DumpPodLogsStartingWith("", oc)
3137
}
32-
})
33-
34-
g.Describe("creating from a template", func() {
35-
g.AfterEach(func() {
36-
for i := 0; i < 3; i++ {
37-
pod := fmt.Sprintf("mongodb-replicaset-%d", i)
38-
podLogs, err := oc.Run("logs").Args(pod, "--timestamps").Output()
39-
if err != nil {
40-
ginkgolog("error retrieving pod logs for %s: %v", pod, err)
41-
continue
42-
}
43-
ginkgolog("pod logs for %s:\n%s", podLogs, err)
44-
}
45-
})
46-
g.It(fmt.Sprintf("should instantiate the template"), func() {
47-
oc.SetOutputDir(exutil.TestContext.OutputDir)
48-
49-
g.By("creating persistent volumes")
50-
_, err := exutil.SetupHostPathVolumes(oc, "256Mi", 3)
51-
o.Expect(err).NotTo(o.HaveOccurred())
52-
defer exutil.RemoveHostPathVolumes(oc)
53-
54-
g.By("creating a new app")
55-
o.Expect(
56-
oc.Run("new-app").Args(
57-
"-f", templatePath,
58-
"-p", "VOLUME_CAPACITY=256Mi",
59-
"-p", "MEMORY_LIMIT=512Mi",
60-
"-p", "MONGODB_IMAGE=centos/mongodb-32-centos7",
61-
"-p", "MONGODB_SERVICE_NAME=mongodb-replicaset",
62-
).Execute(),
63-
).Should(o.Succeed())
64-
65-
g.By("waiting for all pods to reach ready status")
66-
podNames, err := exutil.WaitForPods(
67-
oc.KubeClient().Core().Pods(oc.Namespace()),
68-
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
69-
exutil.CheckPodIsReadyFn,
70-
3,
71-
8*time.Minute,
72-
)
38+
for i := 0; i < 3; i++ {
39+
podLogs, err := oc.Run("logs").Args(fmt.Sprintf("mongodb-replicaset-%d", i), "--timestamps").Output()
7340
if err != nil {
74-
desc, _ := oc.Run("describe").Args("statefulset").Output()
75-
ginkgolog("\n\nStatefulset at failure:\n%s\n\n", desc)
76-
desc, _ = oc.Run("describe").Args("pods").Output()
77-
ginkgolog("\n\nPods at statefulset failure:\n%s\n\n", desc)
78-
}
79-
o.Expect(err).NotTo(o.HaveOccurred())
80-
81-
g.By("expecting that we can insert a new record on primary node")
82-
mongo := dbutil.NewMongoDB(podNames[0])
83-
replicaSet := mongo.(exutil.ReplicaSet)
84-
out, err := replicaSet.QueryPrimary(oc, `db.test.save({ "status" : "passed" })`)
85-
ginkgolog("save result: %s\n", out)
86-
o.Expect(err).ShouldNot(o.HaveOccurred())
87-
88-
g.By("expecting that we can read a record from all members")
89-
for _, podName := range podNames {
90-
o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
41+
e2e.Logf("error retrieving pod logs for %s: %v", fmt.Sprintf("mongodb-replicaset-%d", i), err)
42+
continue
9143
}
44+
e2e.Logf("pod logs for %s:\n%s", podLogs, err)
45+
}
46+
})
47+
g.It(fmt.Sprintf("should instantiate the template"), func() {
48+
oc.SetOutputDir(exutil.TestContext.OutputDir)
49+
50+
g.By("creating a new app")
51+
o.Expect(
52+
oc.Run("new-app").Args(
53+
"-f", templatePath,
54+
"-p", "VOLUME_CAPACITY=256Mi",
55+
"-p", "MEMORY_LIMIT=512Mi",
56+
"-p", "MONGODB_IMAGE=centos/mongodb-32-centos7",
57+
"-p", "MONGODB_SERVICE_NAME=mongodb-replicaset",
58+
).Execute(),
59+
).Should(o.Succeed())
60+
61+
g.By("waiting for all pods to reach ready status")
62+
podNames, err := exutil.WaitForPods(
63+
oc.KubeClient().Core().Pods(oc.Namespace()),
64+
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
65+
exutil.CheckPodIsReadyFn,
66+
3,
67+
8*time.Minute,
68+
)
69+
if err != nil {
70+
desc, _ := oc.Run("describe").Args("statefulset").Output()
71+
e2e.Logf("\n\nStatefulset at failure:\n%s\n\n", desc)
72+
desc, _ = oc.Run("describe").Args("pods").Output()
73+
e2e.Logf("\n\nPods at statefulset failure:\n%s\n\n", desc)
74+
}
75+
o.Expect(err).NotTo(o.HaveOccurred())
76+
77+
g.By("expecting that we can insert a new record on primary node")
78+
mongo := dbutil.NewMongoDB(podNames[0])
79+
replicaSet := mongo.(exutil.ReplicaSet)
80+
out, err := replicaSet.QueryPrimary(oc, `db.test.save({ "status" : "passed" })`)
81+
e2e.Logf("save result: %s\n", out)
82+
o.Expect(err).ShouldNot(o.HaveOccurred())
83+
84+
g.By("expecting that we can read a record from all members")
85+
for _, podName := range podNames {
86+
o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
87+
}
9288

93-
g.By("restarting replica set")
94-
err = oc.Run("delete").Args("pods", "--all", "-n", oc.Namespace()).Execute()
95-
o.Expect(err).ShouldNot(o.HaveOccurred())
96-
97-
g.By("waiting for all pods to be gracefully deleted")
98-
podNames, err = exutil.WaitForPods(
99-
oc.KubeClient().Core().Pods(oc.Namespace()),
100-
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
101-
func(pod kapiv1.Pod) bool { return pod.DeletionTimestamp != nil },
102-
0,
103-
4*time.Minute,
104-
)
105-
o.Expect(err).NotTo(o.HaveOccurred())
106-
107-
g.By("waiting for all pods to reach ready status")
108-
podNames, err = exutil.WaitForPods(
109-
oc.KubeClient().Core().Pods(oc.Namespace()),
110-
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
111-
exutil.CheckPodIsReadyFn,
112-
3,
113-
4*time.Minute,
114-
)
115-
o.Expect(err).NotTo(o.HaveOccurred())
116-
117-
g.By("expecting that we can read a record from all members after its restart")
118-
for _, podName := range podNames {
119-
o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
120-
}
121-
})
89+
g.By("restarting replica set")
90+
err = exutil.RemovePodsWithPrefixes(oc, "mongodb-replicaset")
91+
o.Expect(err).ShouldNot(o.HaveOccurred())
92+
93+
g.By("waiting for all pods to be gracefully deleted")
94+
podNames, err = exutil.WaitForPods(
95+
oc.KubeClient().Core().Pods(oc.Namespace()),
96+
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
97+
func(pod kapiv1.Pod) bool { return pod.DeletionTimestamp != nil },
98+
0,
99+
4*time.Minute,
100+
)
101+
o.Expect(err).NotTo(o.HaveOccurred())
102+
103+
g.By("waiting for all pods to reach ready status")
104+
podNames, err = exutil.WaitForPods(
105+
oc.KubeClient().Core().Pods(oc.Namespace()),
106+
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
107+
exutil.CheckPodIsReadyFn,
108+
3,
109+
4*time.Minute,
110+
)
111+
o.Expect(err).NotTo(o.HaveOccurred())
112+
113+
g.By("expecting that we can read a record from all members after its restart")
114+
for _, podName := range podNames {
115+
o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
116+
}
122117
})
123118
})
124119
})

test/extended/image_ecosystem/mysql_replica.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -65,12 +65,8 @@ func CreateMySQLReplicationHelpers(c kcoreclient.PodInterface, masterDeployment,
6565
func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
6666
return func() {
6767
oc.SetOutputDir(exutil.TestContext.OutputDir)
68-
defer exutil.RemoveHostPathVolumes(oc)
6968

70-
_, err := exutil.SetupHostPathVolumes(oc, "1Gi", 5)
71-
o.Expect(err).NotTo(o.HaveOccurred())
72-
73-
err = testutil.WaitForPolicyUpdate(oc.InternalKubeClient().Authorization(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
69+
err := testutil.WaitForPolicyUpdate(oc.InternalKubeClient().Authorization(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
7470
o.Expect(err).NotTo(o.HaveOccurred())
7571

7672
exutil.CheckOpenShiftNamespaceImageStreams(oc)
@@ -189,21 +185,26 @@ func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
189185
var _ = g.Describe("[image_ecosystem][mysql][Slow] openshift mysql replication", func() {
190186
defer g.GinkgoRecover()
191187

192-
var oc *exutil.CLI
188+
var oc = exutil.NewCLI("mysql-replication", exutil.KubeConfigPath())
193189
g.Context("", func() {
194190
g.BeforeEach(func() {
195191
exutil.DumpDockerInfo()
192+
193+
_, err := exutil.SetupNFSBackedPersistentVolumes(oc, "1Gi", 5)
194+
o.Expect(err).NotTo(o.HaveOccurred())
196195
})
197196

198197
g.AfterEach(func() {
198+
defer exutil.RemoveNFSBackedPersistentVolumes(oc)
199+
defer exutil.RemoveDeploymentConfigs(oc, "mysql-master", "mysql-slave")
200+
199201
if g.CurrentGinkgoTestDescription().Failed {
200202
exutil.DumpPodStates(oc)
201203
exutil.DumpPodLogsStartingWith("", oc)
202204
}
203205
})
204206

205-
for i, tc := range testCases {
206-
oc = exutil.NewCLI(fmt.Sprintf("mysql-replication-%d", i), exutil.KubeConfigPath())
207+
for _, tc := range testCases {
207208
g.It(fmt.Sprintf("MySQL replication template for %s: %s", tc.Version, tc.TemplatePath), replicationTestFactory(oc, tc))
208209
}
209210
})

test/extended/image_ecosystem/postgresql_replica.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -29,25 +29,29 @@ var (
2929
var _ = g.Describe("[image_ecosystem][postgresql][Slow][local] openshift postgresql replication", func() {
3030
defer g.GinkgoRecover()
3131

32-
var oc *exutil.CLI
32+
var oc = exutil.NewCLI("postgresql-replication", exutil.KubeConfigPath())
3333

3434
g.Context("", func() {
3535
g.BeforeEach(func() {
3636
exutil.DumpDockerInfo()
37+
38+
_, err := exutil.SetupNFSBackedPersistentVolumes(oc, "1Gi", 5)
39+
o.Expect(err).NotTo(o.HaveOccurred())
3740
})
3841

3942
g.AfterEach(func() {
43+
defer exutil.RemoveNFSBackedPersistentVolumes(oc)
44+
defer exutil.RemoveDeploymentConfigs(oc, "postgresql-master", "postgresql-slave")
45+
4046
if g.CurrentGinkgoTestDescription().Failed {
4147
exutil.DumpPodStates(oc)
4248
exutil.DumpPodLogsStartingWith("", oc)
4349
exutil.DumpImageStreams(oc)
4450
}
4551
})
4652

47-
for i, image := range postgreSQLImages {
48-
oc = exutil.NewCLI(fmt.Sprintf("postgresql-replication-%d", i), exutil.KubeConfigPath())
49-
testFn := PostgreSQLReplicationTestFactory(oc, image)
50-
g.It(fmt.Sprintf("postgresql replication works for %s", image), testFn)
53+
for _, image := range postgreSQLImages {
54+
g.It(fmt.Sprintf("postgresql replication works for %s", image), PostgreSQLReplicationTestFactory(oc, image))
5155
}
5256
})
5357
})
@@ -82,12 +86,8 @@ func CreatePostgreSQLReplicationHelpers(c kcoreclient.PodInterface, masterDeploy
8286
func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() {
8387
return func() {
8488
oc.SetOutputDir(exutil.TestContext.OutputDir)
85-
defer exutil.RemoveHostPathVolumes(oc)
86-
87-
_, err := exutil.SetupHostPathVolumes(oc, "512Mi", 3)
88-
o.Expect(err).NotTo(o.HaveOccurred())
8989

90-
err = testutil.WaitForPolicyUpdate(oc.InternalKubeClient().Authorization(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
90+
err := testutil.WaitForPolicyUpdate(oc.InternalKubeClient().Authorization(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
9191
o.Expect(err).NotTo(o.HaveOccurred())
9292

9393
exutil.CheckOpenShiftNamespaceImageStreams(oc)
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
package util
2+
3+
import (
4+
"time"
5+
6+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7+
kutilerrors "k8s.io/apimachinery/pkg/util/errors"
8+
"k8s.io/apimachinery/pkg/util/wait"
9+
e2e "k8s.io/kubernetes/test/e2e/framework"
10+
)
11+
12+
// RemoveDeploymentConfigs deletes the given DeploymentConfigs in a namespace
13+
func RemoveDeploymentConfigs(oc *CLI, dcs ...string) error {
14+
errs := []error{}
15+
for _, dc := range dcs {
16+
e2e.Logf("Removing deployment config %s/%s", oc.Namespace(), dc)
17+
if err := oc.AdminAppsClient().Apps().DeploymentConfigs(oc.Namespace()).Delete(dc, &metav1.DeleteOptions{}); err != nil {
18+
e2e.Logf("Error occurred removing deployment config: %v", err)
19+
errs = append(errs, err)
20+
}
21+
22+
err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) {
23+
pods, err := GetApplicationPods(oc, dc)
24+
if err != nil {
25+
e2e.Logf("Unable to get pods for dc/%s: %v", dc, err)
26+
return false, err
27+
}
28+
if len(pods.Items) != 0 {
29+
return false, nil
30+
}
31+
return true, nil
32+
})
33+
34+
if err != nil {
35+
errs = append(errs, err)
36+
}
37+
}
38+
39+
if len(errs) != 0 {
40+
return kutilerrors.NewAggregate(errs)
41+
}
42+
43+
return nil
44+
}

test/extended/util/framework.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,10 @@ func GetApplicationPods(oc *CLI, dcName string) (*kapiv1.PodList, error) {
174174
return oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("deploymentconfig=%s", dcName)).String()})
175175
}
176176

177+
func GetStatefulSetPods(oc *CLI, setName string) (*kapiv1.PodList, error) {
178+
return oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("name=%s", setName)).String()})
179+
}
180+
177181
// DumpDeploymentLogs will dump the latest deployment logs for a DeploymentConfig for debug purposes
178182
func DumpDeploymentLogs(dcName string, version int64, oc *CLI) {
179183
e2e.Logf("Dumping deployment logs for deploymentconfig %q\n", dcName)

test/extended/util/nfs.go

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -170,15 +170,6 @@ func RemoveNFSServer(oc *CLI) error {
170170
if _, err := oc.AsAdmin().Run("adm").Args("policy", "remove-scc-from-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:default", oc.Namespace())).Output(); err != nil {
171171
errs = append(errs, err)
172172
}
173-
// The DeploymentConfig in the Jenkins Persistent template uses the "Recreate" strategy, which will cause the
174-
// Jenkins pod to recreate after it is deleted. Since we have to delete the persistent volume at the end of the
175-
// tests that use the NFSServer backed persistent volume, this causes the Jenkins pod to stall trying to recreate
176-
// without having a valid persistent volume to use, which causes the test cleanup to fail because it can't delete
177-
// the namespace. So we have to delete the Jenkins DeploymentConfig manually instead of allowing the test runner
178-
// to clean up the namespace.
179-
e2e.Logf("Removing jenkins deployment config")
180-
if err := oc.AdminAppsClient().Apps().DeploymentConfigs(oc.Namespace()).Delete("jenkins", &metav1.DeleteOptions{}); err != nil {
181-
errs = append(errs, err)
182-
}
173+
183174
return kutilerrors.NewAggregate(errs)
184175
}

0 commit comments

Comments
 (0)