Run PodSecurityPolicy E2E test in parallel

Previously, this test modified a ClusterRole used by _every_ test.  It had to be run serially, with a special teardown function that restored the state of the ClusterRole for any other serial tests.

Now every test gets its own cluster role, which means this test can be safely run in parallel with all the others, without any special teardown.
This commit is contained in:
Nick Novitski 2019-05-07 17:08:37 -07:00
parent bf11e2ef63
commit e1958b8272

View file

@ -39,7 +39,7 @@ const (
ingressControllerPSP = "ingress-controller-psp"
)
var _ = framework.IngressNginxDescribe("[Serial] Pod Security Policies", func() {
var _ = framework.IngressNginxDescribe("Pod Security Policies", func() {
f := framework.NewDefaultFramework("pod-security-policies")
BeforeEach(func() {
@ -78,30 +78,6 @@ var _ = framework.IngressNginxDescribe("[Serial] Pod Security Policies", func()
f.NewEchoDeployment()
})
AfterEach(func() {
role, err := f.KubeClientSet.RbacV1().ClusterRoles().Get(fmt.Sprintf("nginx-ingress-clusterrole-%v", f.Namespace), metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "getting ingress controller cluster role")
Expect(role).NotTo(BeNil())
index := -1
for idx, rule := range role.Rules {
found := false
for _, rn := range rule.ResourceNames {
if rn == ingressControllerPSP {
found = true
break
}
}
if found {
index = idx
}
}
role.Rules = append(role.Rules[:index], role.Rules[index+1:]...)
_, err = f.KubeClientSet.RbacV1().ClusterRoles().Update(role)
Expect(err).NotTo(HaveOccurred(), "updating ingress controller cluster role to not use a pod security policy")
})
It("should be running with a Pod Security Policy", func() {
f.WaitForNginxConfiguration(
func(cfg string) bool {