Skip to content

Commit e6abb94

Browse files
Merge pull request #29669 from jcaamano/vrf-lite-test
CORENET-5854: networking: add VRF-Lite test cases
2 parents 3bfea73 + 9480cc5 commit e6abb94

File tree

6 files changed

+622
-325
lines changed

6 files changed

+622
-325
lines changed

test/extended/networking/egressip.go

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -685,6 +685,11 @@ func spawnProberSendEgressIPTrafficCheckLogs(
685685

686686
framework.Logf("Launching a new prober pod")
687687
proberPod := createProberPod(oc, externalNamespace, probePodName)
688+
defer func() {
689+
framework.Logf("Destroying the prober pod")
690+
err := destroyProberPod(oc, proberPod)
691+
o.Expect(err).NotTo(o.HaveOccurred())
692+
}()
688693

689694
// Unfortunately, even after we created the EgressIP object and the CloudPrivateIPConfig, it can take some time before everything is applied correctly.
690695
// Retry this test every 30 seconds for up to 2 minutes to give the cluster time to converge - eventually, this test should pass.
@@ -693,10 +698,6 @@ func spawnProberSendEgressIPTrafficCheckLogs(
693698
result, err := sendEgressIPProbesAndCheckPacketSnifferLogs(oc, proberPod, routeName, targetProtocol, targetHost, targetPort, iterations, expectedHits, packetSnifferDaemonSet, egressIPSet, 10)
694699
return err == nil && result
695700
}, 120*time.Second, 30*time.Second).Should(o.BeTrue())
696-
697-
framework.Logf("Destroying the prober pod")
698-
err := destroyProberPod(oc, proberPod)
699-
o.Expect(err).NotTo(o.HaveOccurred())
700701
}
701702

702703
// ovnKubernetesCreateEgressIPObject creates the file containing the EgressIP YAML definition which can

test/extended/networking/egressip_helpers.go

Lines changed: 77 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -630,26 +630,15 @@ func scanPacketSnifferDaemonSetPodLogs(oc *exutil.CLI, ds *appsv1.DaemonSet, tar
630630

631631
matchedIPs := make(map[string]int)
632632
for _, pod := range pods.Items {
633-
logOptions := corev1.PodLogOptions{}
634-
req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &logOptions)
635-
logs, err := req.Stream(context.TODO())
633+
buf, err := getLogsAsBuffer(clientset, &pod)
636634
if err != nil {
637-
return nil, fmt.Errorf("Error in opening log stream: %v", err)
638-
}
639-
defer logs.Close()
640-
641-
buf := new(bytes.Buffer)
642-
_, err = io.Copy(buf, logs)
643-
if err != nil {
644-
return nil, fmt.Errorf("Error in copying info from pod logs to buffer")
635+
return nil, err
645636
}
646-
_ = buf.String()
647637

648638
var ip string
649639
scanner := bufio.NewScanner(buf)
650640
for scanner.Scan() {
651641
logLine := scanner.Text()
652-
653642
if !strings.HasPrefix(logLine, "Parsed") || !strings.Contains(logLine, searchString) {
654643
continue
655644
}
@@ -1548,6 +1537,51 @@ func createHostNetworkedDaemonSetAndProbe(clientset kubernetes.Interface, namesp
15481537
return ds, fmt.Errorf("Daemonset still not ready after %d tries", retries)
15491538
}
15501539

1540+
func getLogsAsBuffer(clientset kubernetes.Interface, pod *v1.Pod) (*bytes.Buffer, error) {
1541+
logOptions := corev1.PodLogOptions{}
1542+
req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &logOptions)
1543+
logs, err := req.Stream(context.TODO())
1544+
if err != nil {
1545+
return nil, fmt.Errorf("Error in opening log stream")
1546+
}
1547+
defer logs.Close()
1548+
1549+
buf := new(bytes.Buffer)
1550+
_, err = io.Copy(buf, logs)
1551+
if err != nil {
1552+
return nil, fmt.Errorf("Error in copying info from pod logs to buffer")
1553+
}
1554+
_ = buf.String()
1555+
return buf, nil
1556+
}
1557+
1558+
func getLogs(clientset kubernetes.Interface, pod *v1.Pod) (string, error) {
1559+
b, err := getLogsAsBuffer(clientset, pod)
1560+
if err != nil {
1561+
return "", err
1562+
}
1563+
return b.String(), nil
1564+
}
1565+
1566+
func getDaemonSetLogs(clientset kubernetes.Interface, ds *appsv1.DaemonSet) (map[string]string, error) {
1567+
pods, err := clientset.CoreV1().Pods(ds.Namespace).List(
1568+
context.TODO(),
1569+
metav1.ListOptions{LabelSelector: labels.Set(ds.Spec.Selector.MatchLabels).String()})
1570+
if err != nil {
1571+
return nil, err
1572+
}
1573+
1574+
logs := make(map[string]string, len(pods.Items))
1575+
for _, pod := range pods.Items {
1576+
log, err := getLogs(clientset, &pod)
1577+
if err != nil {
1578+
return nil, err
1579+
}
1580+
logs[pod.Spec.NodeName] = log
1581+
}
1582+
return logs, nil
1583+
}
1584+
15511585
// podHasPortConflict scans the pod for a port conflict message and also scans the
15521586
// pod's logs for error messages that might indicate such a conflict.
15531587
func podHasPortConflict(clientset kubernetes.Interface, pod v1.Pod) (bool, error) {
@@ -1561,20 +1595,10 @@ func podHasPortConflict(clientset kubernetes.Interface, pod v1.Pod) (bool, error
15611595

15621596
}
15631597
} else if pod.Status.Phase == v1.PodRunning {
1564-
logOptions := corev1.PodLogOptions{}
1565-
req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &logOptions)
1566-
logs, err := req.Stream(context.TODO())
1598+
logStr, err := getLogs(clientset, &pod)
15671599
if err != nil {
1568-
return false, fmt.Errorf("Error in opening log stream")
1569-
}
1570-
defer logs.Close()
1571-
1572-
buf := new(bytes.Buffer)
1573-
_, err = io.Copy(buf, logs)
1574-
if err != nil {
1575-
return false, fmt.Errorf("Error in copying info from pod logs to buffer")
1600+
return false, err
15761601
}
1577-
logStr := buf.String()
15781602
if strings.Contains(logStr, "address already in use") {
15791603
return true, nil
15801604
}
@@ -1609,22 +1633,42 @@ func getDaemonSetPodIPs(clientset kubernetes.Interface, namespace, daemonsetName
16091633
// for the specified number of iterations and returns a set of the clientIP addresses that were returned.
16101634
// At the end of the test, the prober pod is deleted again.
16111635
func probeForClientIPs(oc *exutil.CLI, proberPodNamespace, proberPodName, url, targetIP string, targetPort, iterations int) (map[string]struct{}, error) {
1636+
responseSet, err := probeForRequest(oc, proberPodNamespace, proberPodName, url, targetIP, "clientip", targetPort, iterations, nil)
1637+
if err != nil {
1638+
return nil, err
1639+
}
1640+
1641+
clientIpSet := make(map[string]struct{}, len(responseSet))
1642+
for response := range responseSet {
1643+
clientIpPort := strings.Split(response, ":")
1644+
if len(clientIpPort) != 2 {
1645+
continue
1646+
}
1647+
clientIp := clientIpPort[0]
1648+
clientIpSet[clientIp] = struct{}{}
1649+
}
1650+
1651+
return clientIpSet, nil
1652+
}
1653+
1654+
// probeForRequest spawns a prober pod inside the prober namespace. It then runs curl against http://%s/dial?host=%s&port=%d&request=%s
1655+
// for the specified number of iterations and returns a set of the responses that were returned.
1656+
// At the end of the test, the prober pod is deleted again.
1657+
func probeForRequest(oc *exutil.CLI, proberPodNamespace, proberPodName, url, targetIP, request string, targetPort, iterations int, tweak func(*v1.Pod)) (map[string]struct{}, error) {
16121658
if oc == nil {
16131659
return nil, fmt.Errorf("Nil pointer to exutil.CLI oc was provided in SendProbesToHostPort.")
16141660
}
16151661

16161662
f := oc.KubeFramework()
16171663
clientset := f.ClientSet
16181664

1619-
clientIpSet := make(map[string]struct{})
1665+
responseSet := make(map[string]struct{})
16201666

1621-
proberPod := frameworkpod.CreateExecPodOrFail(context.TODO(), clientset, proberPodNamespace, probePodName, func(pod *corev1.Pod) {
1622-
// pod.ObjectMeta.Annotations = annotation
1623-
})
1624-
request := fmt.Sprintf("http://%s/dial?host=%s&port=%d&request=/clientip", url, targetIP, targetPort)
1667+
proberPod := frameworkpod.CreateExecPodOrFail(context.TODO(), clientset, proberPodNamespace, probePodName, tweak)
1668+
request = fmt.Sprintf("http://%s/dial?host=%s&port=%d&request=/%s", url, targetIP, targetPort, request)
16251669
maxTimeouts := 3
16261670
for i := 0; i < iterations; i++ {
1627-
output, err := runOcWithRetry(oc.AsAdmin(), "exec", "--", "curl", "-s", request)
1671+
output, err := runOcWithRetry(oc.AsAdmin(), "exec", "-n", proberPod.Namespace, proberPod.Name, "--", "curl", "-s", request)
16281672
if err != nil {
16291673
// if we hit an i/o timeout, retry
16301674
if timeoutError, _ := regexp.Match("^Unable to connect to the server: dial tcp.*i/o timeout$", []byte(output)); timeoutError && maxTimeouts > 0 {
@@ -1645,12 +1689,7 @@ func probeForClientIPs(oc *exutil.CLI, proberPodNamespace, proberPodName, url, t
16451689
if len(dialResponse.Responses) != 1 {
16461690
continue
16471691
}
1648-
clientIpPort := strings.Split(dialResponse.Responses[0], ":")
1649-
if len(clientIpPort) != 2 {
1650-
continue
1651-
}
1652-
clientIp := clientIpPort[0]
1653-
clientIpSet[clientIp] = struct{}{}
1692+
responseSet[dialResponse.Responses[0]] = struct{}{}
16541693
}
16551694

16561695
// delete the exec pod again - in foreground, so that it blocks
@@ -1661,7 +1700,7 @@ func probeForClientIPs(oc *exutil.CLI, proberPodNamespace, proberPodName, url, t
16611700
return nil, err
16621701
}
16631702

1664-
return clientIpSet, nil
1703+
return responseSet, nil
16651704
}
16661705

16671706
// getTargetProtocolHostPort gets targetProtocol, targetHost, targetPort.

0 commit comments

Comments
 (0)