Fix status update in case of connection errors
This commit is contained in:
parent
468872b7e9
commit
fed013ab6f
11 changed files with 326 additions and 107 deletions
|
@ -41,19 +41,6 @@ fi
|
||||||
|
|
||||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||||
|
|
||||||
mkdir -p ${SCRIPT_ROOT}/test/binaries
|
|
||||||
|
|
||||||
TEST_BINARIES=$( cd "${SCRIPT_ROOT}/test/binaries" ; pwd -P )
|
|
||||||
|
|
||||||
export PATH=${TEST_BINARIES}:$PATH
|
|
||||||
|
|
||||||
if ! [ -x "$(command -v kubectl)" ]; then
|
|
||||||
echo "downloading kubectl..."
|
|
||||||
curl -sSLo ${TEST_BINARIES}/kubectl \
|
|
||||||
https://storage.googleapis.com/kubernetes-release/release/v1.11.0/bin/linux/amd64/kubectl
|
|
||||||
chmod +x ${TEST_BINARIES}/kubectl
|
|
||||||
fi
|
|
||||||
|
|
||||||
ginkgo build ./test/e2e
|
ginkgo build ./test/e2e
|
||||||
|
|
||||||
exec -- \
|
exec -- \
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
FROM quay.io/kubernetes-ingress-controller/nginx-amd64:0.63
|
FROM quay.io/kubernetes-ingress-controller/nginx-amd64:0.64
|
||||||
|
|
||||||
RUN clean-install \
|
RUN clean-install \
|
||||||
g++ \
|
g++ \
|
||||||
|
@ -61,3 +61,7 @@ RUN luarocks install luacheck \
|
||||||
|
|
||||||
RUN go get github.com/onsi/ginkgo/ginkgo \
|
RUN go get github.com/onsi/ginkgo/ginkgo \
|
||||||
&& go get golang.org/x/lint/golint
|
&& go get golang.org/x/lint/golint
|
||||||
|
|
||||||
|
RUN curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl \
|
||||||
|
&& chmod +x /usr/local/bin/kubectl
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,6 @@ package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -257,7 +256,7 @@ func (n *NGINXController) Start() {
|
||||||
n.store.Run(n.stopCh)
|
n.store.Run(n.stopCh)
|
||||||
|
|
||||||
if n.syncStatus != nil {
|
if n.syncStatus != nil {
|
||||||
go n.syncStatus.Run(context.Background())
|
go n.syncStatus.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := nginxExecCommand()
|
cmd := nginxExecCommand()
|
||||||
|
|
|
@ -51,7 +51,7 @@ const (
|
||||||
|
|
||||||
// Sync ...
|
// Sync ...
|
||||||
type Sync interface {
|
type Sync interface {
|
||||||
Run(ctx context.Context)
|
Run()
|
||||||
Shutdown()
|
Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,22 +93,97 @@ type statusSync struct {
|
||||||
pod *k8s.PodInfo
|
pod *k8s.PodInfo
|
||||||
|
|
||||||
elector *leaderelection.LeaderElector
|
elector *leaderelection.LeaderElector
|
||||||
|
|
||||||
// workqueue used to keep in sync the status IP/s
|
// workqueue used to keep in sync the status IP/s
|
||||||
// in the Ingress rules
|
// in the Ingress rules
|
||||||
syncQueue *task.Queue
|
syncQueue *task.Queue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts the loop to keep the status in sync
|
// Run starts the loop to keep the status in sync
|
||||||
func (s statusSync) Run(ctx context.Context) {
|
func (s statusSync) Run() {
|
||||||
s.elector.Run(ctx)
|
// we need to use the defined ingress class to allow multiple leaders
|
||||||
|
// in order to update information about ingress status
|
||||||
|
electionID := fmt.Sprintf("%v-%v", s.Config.ElectionID, s.Config.DefaultIngressClass)
|
||||||
|
if s.Config.IngressClass != "" {
|
||||||
|
electionID = fmt.Sprintf("%v-%v", s.Config.ElectionID, s.Config.IngressClass)
|
||||||
|
}
|
||||||
|
|
||||||
|
// start a new context
|
||||||
|
ctx := context.Background()
|
||||||
|
// allow to cancel the context in case we stop being the leader
|
||||||
|
leaderCtx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
var stopCh chan struct{}
|
||||||
|
callbacks := leaderelection.LeaderCallbacks{
|
||||||
|
OnStartedLeading: func(ctx context.Context) {
|
||||||
|
glog.V(2).Infof("I am the new status update leader")
|
||||||
|
stopCh = make(chan struct{})
|
||||||
|
go s.syncQueue.Run(time.Second, stopCh)
|
||||||
|
// trigger initial sync
|
||||||
|
s.syncQueue.EnqueueTask(task.GetDummyObject("sync status"))
|
||||||
|
// when this instance is the leader we need to enqueue
|
||||||
|
// an item to trigger the update of the Ingress status.
|
||||||
|
wait.PollUntil(updateInterval, func() (bool, error) {
|
||||||
|
s.syncQueue.EnqueueTask(task.GetDummyObject("sync status"))
|
||||||
|
return false, nil
|
||||||
|
}, stopCh)
|
||||||
|
},
|
||||||
|
OnStoppedLeading: func() {
|
||||||
|
glog.V(2).Infof("I am not status update leader anymore")
|
||||||
|
close(stopCh)
|
||||||
|
|
||||||
|
// cancel the context
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
// start a new context and run the elector
|
||||||
|
leaderCtx, cancel = context.WithCancel(ctx)
|
||||||
|
go s.elector.Run(leaderCtx)
|
||||||
|
},
|
||||||
|
OnNewLeader: func(identity string) {
|
||||||
|
glog.Infof("new leader elected: %v", identity)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
broadcaster := record.NewBroadcaster()
|
||||||
|
hostname, _ := os.Hostname()
|
||||||
|
|
||||||
|
recorder := broadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{
|
||||||
|
Component: "ingress-leader-elector",
|
||||||
|
Host: hostname,
|
||||||
|
})
|
||||||
|
|
||||||
|
lock := resourcelock.ConfigMapLock{
|
||||||
|
ConfigMapMeta: metav1.ObjectMeta{Namespace: s.pod.Namespace, Name: electionID},
|
||||||
|
Client: s.Config.Client.CoreV1(),
|
||||||
|
LockConfig: resourcelock.ResourceLockConfig{
|
||||||
|
Identity: s.pod.Name,
|
||||||
|
EventRecorder: recorder,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ttl := 30 * time.Second
|
||||||
|
le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||||
|
Lock: &lock,
|
||||||
|
LeaseDuration: ttl,
|
||||||
|
RenewDeadline: ttl / 2,
|
||||||
|
RetryPeriod: ttl / 4,
|
||||||
|
Callbacks: callbacks,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("unexpected error starting leader election: %v", err)
|
||||||
|
}
|
||||||
|
s.elector = le
|
||||||
|
|
||||||
|
go le.Run(leaderCtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown stop the sync. In case the instance is the leader it will remove the current IP
|
// Shutdown stop the sync. In case the instance is the leader it will remove the current IP
|
||||||
// if there is no other instances running.
|
// if there is no other instances running.
|
||||||
func (s statusSync) Shutdown() {
|
func (s statusSync) Shutdown() {
|
||||||
go s.syncQueue.Shutdown()
|
go s.syncQueue.Shutdown()
|
||||||
|
|
||||||
// remove IP from Ingress
|
// remove IP from Ingress
|
||||||
if !s.elector.IsLeader() {
|
if s.elector != nil && !s.elector.IsLeader() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,6 +221,10 @@ func (s *statusSync) sync(key interface{}) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.elector != nil && !s.elector.IsLeader() {
|
||||||
|
return fmt.Errorf("i am not the current leader. Skiping status update")
|
||||||
|
}
|
||||||
|
|
||||||
addrs, err := s.runningAddresses()
|
addrs, err := s.runningAddresses()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -173,66 +252,6 @@ func NewStatusSyncer(config Config) Sync {
|
||||||
}
|
}
|
||||||
st.syncQueue = task.NewCustomTaskQueue(st.sync, st.keyfunc)
|
st.syncQueue = task.NewCustomTaskQueue(st.sync, st.keyfunc)
|
||||||
|
|
||||||
// we need to use the defined ingress class to allow multiple leaders
|
|
||||||
// in order to update information about ingress status
|
|
||||||
electionID := fmt.Sprintf("%v-%v", config.ElectionID, config.DefaultIngressClass)
|
|
||||||
if config.IngressClass != "" {
|
|
||||||
electionID = fmt.Sprintf("%v-%v", config.ElectionID, config.IngressClass)
|
|
||||||
}
|
|
||||||
|
|
||||||
var stopCh chan struct{}
|
|
||||||
callbacks := leaderelection.LeaderCallbacks{
|
|
||||||
OnStartedLeading: func(ctx context.Context) {
|
|
||||||
glog.V(2).Infof("I am the new status update leader")
|
|
||||||
stopCh = make(chan struct{})
|
|
||||||
go st.syncQueue.Run(time.Second, stopCh)
|
|
||||||
// when this instance is the leader we need to enqueue
|
|
||||||
// an item to trigger the update of the Ingress status.
|
|
||||||
wait.PollUntil(updateInterval, func() (bool, error) {
|
|
||||||
st.syncQueue.EnqueueTask(task.GetDummyObject("sync status"))
|
|
||||||
return false, nil
|
|
||||||
}, stopCh)
|
|
||||||
},
|
|
||||||
OnStoppedLeading: func() {
|
|
||||||
glog.V(2).Infof("I am not status update leader anymore")
|
|
||||||
close(stopCh)
|
|
||||||
},
|
|
||||||
OnNewLeader: func(identity string) {
|
|
||||||
glog.Infof("new leader elected: %v", identity)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
broadcaster := record.NewBroadcaster()
|
|
||||||
hostname, _ := os.Hostname()
|
|
||||||
|
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{
|
|
||||||
Component: "ingress-leader-elector",
|
|
||||||
Host: hostname,
|
|
||||||
})
|
|
||||||
|
|
||||||
lock := resourcelock.ConfigMapLock{
|
|
||||||
ConfigMapMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: electionID},
|
|
||||||
Client: config.Client.CoreV1(),
|
|
||||||
LockConfig: resourcelock.ResourceLockConfig{
|
|
||||||
Identity: pod.Name,
|
|
||||||
EventRecorder: recorder,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ttl := 30 * time.Second
|
|
||||||
le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
|
||||||
Lock: &lock,
|
|
||||||
LeaseDuration: ttl,
|
|
||||||
RenewDeadline: ttl / 2,
|
|
||||||
RetryPeriod: ttl / 4,
|
|
||||||
Callbacks: callbacks,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
glog.Fatalf("unexpected error starting leader election: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
st.elector = le
|
|
||||||
return st
|
return st
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,6 +352,13 @@ func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) {
|
||||||
sort.SliceStable(newIngressPoint, lessLoadBalancerIngress(newIngressPoint))
|
sort.SliceStable(newIngressPoint, lessLoadBalancerIngress(newIngressPoint))
|
||||||
|
|
||||||
for _, ing := range ings {
|
for _, ing := range ings {
|
||||||
|
curIPs := ing.Status.LoadBalancer.Ingress
|
||||||
|
sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs))
|
||||||
|
if ingressSliceEqual(curIPs, newIngressPoint) {
|
||||||
|
glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
batch.Queue(runUpdate(ing, newIngressPoint, s.Client))
|
batch.Queue(runUpdate(ing, newIngressPoint, s.Client))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,14 +373,6 @@ func runUpdate(ing *extensions.Ingress, status []apiv1.LoadBalancerIngress,
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
curIPs := ing.Status.LoadBalancer.Ingress
|
|
||||||
sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs))
|
|
||||||
|
|
||||||
if ingressSliceEqual(status, curIPs) {
|
|
||||||
glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name)
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ingClient := client.ExtensionsV1beta1().Ingresses(ing.Namespace)
|
ingClient := client.ExtensionsV1beta1().Ingresses(ing.Namespace)
|
||||||
|
|
||||||
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
|
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||||
|
@ -398,5 +416,6 @@ func ingressSliceEqual(lhs, rhs []apiv1.LoadBalancerIngress) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||||
package status
|
package status
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -298,7 +297,7 @@ func TestStatusActions(t *testing.T) {
|
||||||
fk := fkSync.(statusSync)
|
fk := fkSync.(statusSync)
|
||||||
|
|
||||||
// start it and wait for the election and syn actions
|
// start it and wait for the election and syn actions
|
||||||
go fk.Run(context.Background())
|
go fk.Run()
|
||||||
// wait for the election
|
// wait for the election
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
// execute sync
|
// execute sync
|
||||||
|
|
|
@ -183,8 +183,7 @@ func execInfluxDBCommand(pod *corev1.Pod, command string) (string, error) {
|
||||||
execErr bytes.Buffer
|
execErr bytes.Buffer
|
||||||
)
|
)
|
||||||
|
|
||||||
args := fmt.Sprintf("kubectl exec --namespace %v %v -- %v", pod.Namespace, pod.Name, command)
|
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%v exec --namespace %s %s -- %s", framework.KubectlPath, pod.Namespace, pod.Name, command))
|
||||||
cmd := exec.Command("/bin/bash", "-c", args)
|
|
||||||
cmd.Stdout = &execOut
|
cmd.Stdout = &execOut
|
||||||
cmd.Stderr = &execErr
|
cmd.Stderr = &execErr
|
||||||
|
|
||||||
|
@ -195,7 +194,7 @@ func execInfluxDBCommand(pod *corev1.Pod, command string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("could not execute: %v", err)
|
return "", fmt.Errorf("could not execute '%s %s': %v", cmd.Path, cmd.Args, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return execOut.String(), nil
|
return execOut.String(), nil
|
||||||
|
|
|
@ -17,13 +17,14 @@ limitations under the License.
|
||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/ginkgo/config"
|
"github.com/onsi/ginkgo/config"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/apiserver/pkg/util/logs"
|
"k8s.io/apiserver/pkg/util/logs"
|
||||||
|
|
||||||
// required
|
// required
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||||
|
|
||||||
|
@ -36,6 +37,7 @@ import (
|
||||||
_ "k8s.io/ingress-nginx/test/e2e/servicebackend"
|
_ "k8s.io/ingress-nginx/test/e2e/servicebackend"
|
||||||
_ "k8s.io/ingress-nginx/test/e2e/settings"
|
_ "k8s.io/ingress-nginx/test/e2e/settings"
|
||||||
_ "k8s.io/ingress-nginx/test/e2e/ssl"
|
_ "k8s.io/ingress-nginx/test/e2e/ssl"
|
||||||
|
_ "k8s.io/ingress-nginx/test/e2e/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunE2ETests checks configuration parameters (specified through flags) and then runs
|
// RunE2ETests checks configuration parameters (specified through flags) and then runs
|
||||||
|
@ -50,7 +52,12 @@ func RunE2ETests(t *testing.T) {
|
||||||
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
|
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode)
|
if os.Getenv("KUBECTL_PATH") != "" {
|
||||||
|
framework.KubectlPath = os.Getenv("KUBECTL_PATH")
|
||||||
|
framework.Logf("Using kubectl path '%s'", framework.KubectlPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
framework.Logf("Starting e2e run %q on Ginkgo node %d", framework.RunID, config.GinkgoConfig.ParallelNode)
|
||||||
ginkgo.RunSpecs(t, "nginx-ingress-controller e2e suite")
|
ginkgo.RunSpecs(t, "nginx-ingress-controller e2e suite")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,11 @@ package framework
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
@ -31,14 +35,14 @@ func (f *Framework) ExecCommand(pod *v1.Pod, command string) (string, error) {
|
||||||
execErr bytes.Buffer
|
execErr bytes.Buffer
|
||||||
)
|
)
|
||||||
|
|
||||||
args := fmt.Sprintf("kubectl exec --namespace %v %v --container nginx-ingress-controller -- %v", pod.Namespace, pod.Name, command)
|
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%v exec --namespace %s %s --container nginx-ingress-controller -- %s", KubectlPath, pod.Namespace, pod.Name, command))
|
||||||
cmd := exec.Command("/bin/bash", "-c", args)
|
|
||||||
cmd.Stdout = &execOut
|
cmd.Stdout = &execOut
|
||||||
cmd.Stderr = &execErr
|
cmd.Stderr = &execErr
|
||||||
|
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("could not execute: %v", err)
|
return "", fmt.Errorf("could not execute '%s %s': %v", cmd.Path, cmd.Args, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if execErr.Len() > 0 {
|
if execErr.Len() > 0 {
|
||||||
|
@ -59,3 +63,51 @@ func (f *Framework) NewIngressController(namespace string) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
proxyRegexp = regexp.MustCompile("Starting to serve on .*:([0-9]+)")
|
||||||
|
)
|
||||||
|
|
||||||
|
// KubectlProxy creates a proxy to kubernetes apiserver
|
||||||
|
func (f *Framework) KubectlProxy(port int) (int, *exec.Cmd, error) {
|
||||||
|
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s proxy --accept-hosts=.* --address=0.0.0.0 --port=%d", KubectlPath, port))
|
||||||
|
stdout, stderr, err := startCmdAndStreamOutput(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return -1, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer stdout.Close()
|
||||||
|
defer stderr.Close()
|
||||||
|
|
||||||
|
buf := make([]byte, 128)
|
||||||
|
var n int
|
||||||
|
if n, err = stdout.Read(buf); err != nil {
|
||||||
|
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
output := string(buf[:n])
|
||||||
|
match := proxyRegexp.FindStringSubmatch(output)
|
||||||
|
if len(match) == 2 {
|
||||||
|
if port, err := strconv.Atoi(match[1]); err == nil {
|
||||||
|
return port, cmd, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
|
||||||
|
stdout, err = cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stderr, err = cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
Logf("Asynchronously running '%s'", strings.Join(cmd.Args, " "))
|
||||||
|
err = cmd.Start()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
@ -46,6 +46,11 @@ const (
|
||||||
HTTPS RequestScheme = "https"
|
HTTPS RequestScheme = "https"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// KubectlPath defines the full path of the kubectl binary
|
||||||
|
KubectlPath = "/usr/local/bin/kubectl"
|
||||||
|
)
|
||||||
|
|
||||||
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
||||||
type Framework struct {
|
type Framework struct {
|
||||||
BaseName string
|
BaseName string
|
||||||
|
@ -197,9 +202,8 @@ func (f *Framework) WaitForNginxConfiguration(matcher func(cfg string) bool) err
|
||||||
return wait.Poll(Poll, time.Minute*5, f.matchNginxConditions("", matcher))
|
return wait.Poll(Poll, time.Minute*5, f.matchNginxConditions("", matcher))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NginxLogs returns the logs of the nginx ingress controller pod running
|
func nginxLogs(client kubernetes.Interface, namespace string) (string, error) {
|
||||||
func (f *Framework) NginxLogs() (string, error) {
|
l, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{
|
||||||
l, err := f.KubeClientSet.CoreV1().Pods(f.IngressController.Namespace).List(metav1.ListOptions{
|
|
||||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -209,7 +213,7 @@ func (f *Framework) NginxLogs() (string, error) {
|
||||||
for _, pod := range l.Items {
|
for _, pod := range l.Items {
|
||||||
if strings.HasPrefix(pod.GetName(), "nginx-ingress-controller") {
|
if strings.HasPrefix(pod.GetName(), "nginx-ingress-controller") {
|
||||||
if isRunning, err := podRunningReady(&pod); err == nil && isRunning {
|
if isRunning, err := podRunningReady(&pod); err == nil && isRunning {
|
||||||
return f.Logs(&pod)
|
return Logs(&pod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -217,6 +221,11 @@ func (f *Framework) NginxLogs() (string, error) {
|
||||||
return "", fmt.Errorf("no nginx ingress controller pod is running (logs)")
|
return "", fmt.Errorf("no nginx ingress controller pod is running (logs)")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NginxLogs returns the logs of the nginx ingress controller pod running
|
||||||
|
func (f *Framework) NginxLogs() (string, error) {
|
||||||
|
return nginxLogs(f.KubeClientSet, f.IngressController.Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) bool) wait.ConditionFunc {
|
func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) bool) wait.ConditionFunc {
|
||||||
return func() (bool, error) {
|
return func() (bool, error) {
|
||||||
l, err := f.KubeClientSet.CoreV1().Pods(f.IngressController.Namespace).List(metav1.ListOptions{
|
l, err := f.KubeClientSet.CoreV1().Pods(f.IngressController.Namespace).List(metav1.ListOptions{
|
||||||
|
@ -380,7 +389,7 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
|
||||||
LabelSelector: fields.SelectorFromSet(fields.Set(deployment.Spec.Template.ObjectMeta.Labels)).String(),
|
LabelSelector: fields.SelectorFromSet(fields.Set(deployment.Spec.Template.ObjectMeta.Labels)).String(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to wait for nginx-ingress-controller replica count to be %v", replicas)
|
return errors.Wrapf(err, "waiting for nginx-ingress-controller replica count to be %v", replicas)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -25,7 +25,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Logs returns the log entries of a given Pod.
|
// Logs returns the log entries of a given Pod.
|
||||||
func (f *Framework) Logs(pod *v1.Pod) (string, error) {
|
func Logs(pod *v1.Pod) (string, error) {
|
||||||
var (
|
var (
|
||||||
execOut bytes.Buffer
|
execOut bytes.Buffer
|
||||||
execErr bytes.Buffer
|
execErr bytes.Buffer
|
||||||
|
@ -35,14 +35,13 @@ func (f *Framework) Logs(pod *v1.Pod) (string, error) {
|
||||||
return "", fmt.Errorf("could not determine which container to use")
|
return "", fmt.Errorf("could not determine which container to use")
|
||||||
}
|
}
|
||||||
|
|
||||||
args := fmt.Sprintf("kubectl logs -n %v %v", pod.Namespace, pod.Name)
|
cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%v logs --namespace %s %s", KubectlPath, pod.Namespace, pod.Name))
|
||||||
cmd := exec.Command("/bin/bash", "-c", args)
|
|
||||||
cmd.Stdout = &execOut
|
cmd.Stdout = &execOut
|
||||||
cmd.Stderr = &execErr
|
cmd.Stderr = &execErr
|
||||||
|
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("could not execute: %v", err)
|
return "", fmt.Errorf("could not execute '%s %s': %v", cmd.Path, cmd.Args, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if execErr.Len() > 0 {
|
if execErr.Len() > 0 {
|
||||||
|
|
145
test/e2e/status/update.go
Normal file
145
test/e2e/status/update.go
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
/*
|
||||||
|
Copyright 2018 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package settings
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||||
|
apiv1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
|
||||||
|
"k8s.io/ingress-nginx/test/e2e/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = framework.IngressNginxDescribe("Status Update [Status]", func() {
|
||||||
|
f := framework.NewDefaultFramework("status-update")
|
||||||
|
host := "status-update"
|
||||||
|
address := getHostIP()
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should update status field after client-go reconnection", func() {
|
||||||
|
port, cmd, err := f.KubectlProxy(0)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "starting kubectl proxy")
|
||||||
|
|
||||||
|
err = framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 1,
|
||||||
|
func(deployment *appsv1beta1.Deployment) error {
|
||||||
|
args := deployment.Spec.Template.Spec.Containers[0].Args
|
||||||
|
args = append(args, fmt.Sprintf("--apiserver-host=http://%s:%d", address.String(), port))
|
||||||
|
args = append(args, "--publish-status-address=1.1.0.0")
|
||||||
|
// flags --publish-service and --publish-status-address are mutually exclusive
|
||||||
|
var index int
|
||||||
|
for k, v := range args {
|
||||||
|
if strings.Index(v, "--publish-service") != -1 {
|
||||||
|
index = k
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if index > -1 {
|
||||||
|
args[index] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||||
|
_, err := f.KubeClientSet.AppsV1beta1().Deployments(f.IngressController.Namespace).Update(deployment)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "updating ingress controller deployment flags")
|
||||||
|
|
||||||
|
err = f.NewEchoDeploymentWithReplicas(1)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "waiting one replicaset in echoserver deployment")
|
||||||
|
|
||||||
|
ing, err := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil))
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "waiting Ingress creation for hostname %v", host)
|
||||||
|
Expect(ing).NotTo(BeNil())
|
||||||
|
|
||||||
|
err = f.WaitForNginxConfiguration(
|
||||||
|
func(cfg string) bool {
|
||||||
|
return strings.Contains(cfg, fmt.Sprintf("server_name %s", host))
|
||||||
|
})
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "waiting for nginx server section with server_name %v", host)
|
||||||
|
|
||||||
|
framework.Logf("waiting for leader election and initial status update")
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
|
||||||
|
err = cmd.Process.Kill()
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "terminating kubectl proxy")
|
||||||
|
|
||||||
|
ing, err = f.KubeClientSet.Extensions().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "getting %s/%v Ingress", f.IngressController.Namespace, host)
|
||||||
|
|
||||||
|
ing.Status.LoadBalancer.Ingress = []apiv1.LoadBalancerIngress{}
|
||||||
|
_, err = f.KubeClientSet.Extensions().Ingresses(f.IngressController.Namespace).UpdateStatus(ing)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "cleaning Ingress status")
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
err = f.KubeClientSet.CoreV1().
|
||||||
|
ConfigMaps(f.IngressController.Namespace).
|
||||||
|
Delete("ingress-controller-leader-nginx", &metav1.DeleteOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "deleting leader election configmap")
|
||||||
|
|
||||||
|
_, cmd, err = f.KubectlProxy(port)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "starting kubectl proxy")
|
||||||
|
defer func() {
|
||||||
|
if cmd != nil {
|
||||||
|
err := cmd.Process.Kill()
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "terminating kubectl proxy")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = wait.Poll(10*time.Second, time.Minute*3, func() (done bool, err error) {
|
||||||
|
ing, err = f.KubeClientSet.Extensions().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ing.Status.LoadBalancer.Ingress) != 1 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "waiting for ingress status")
|
||||||
|
Expect(ing.Status.LoadBalancer.Ingress).Should(Equal([]apiv1.LoadBalancerIngress{
|
||||||
|
{IP: "1.1.0.0"},
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
func getHostIP() net.IP {
|
||||||
|
conn, err := net.Dial("udp", "8.8.8.8:80")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
localAddr := conn.LocalAddr().(*net.UDPAddr)
|
||||||
|
|
||||||
|
return localAddr.IP
|
||||||
|
}
|
Loading…
Reference in a new issue