Skip to content

Commit

Permalink
fix deprevisionng of namespaces
Browse files Browse the repository at this point in the history
Signed-off-by: hzrncik <hzrncik@redhat.com>
  • Loading branch information
henryZrncik committed Aug 29, 2024
1 parent 2a7c836 commit 5167d99
Show file tree
Hide file tree
Showing 9 changed files with 175 additions and 112 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,16 @@ public static void waitForInstantConsumerClientSuccess(TestStorage testStorage)
waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), testStorage.getMessageCount());
}

/**
* Waits for the instant producer client to succeed with explicitly specified namespace automatically deleting the associated job afterward.
*
* @param namespaceName Explicit namespace name.
* @param testStorage The {@link TestStorage} instance containing details about the client's name.
*/
public static void waitForInstantProducerClientSuccess(String namespaceName, TestStorage testStorage) {
waitForClientSuccess(testStorage.getProducerName(), namespaceName, testStorage.getMessageCount());
}

/**
* Waits for the instant producer client to succeed, automatically deleting the associated job afterward.
* {@link TestStorage#getProducerName()} is used for identifying producer Job and
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import io.strimzi.api.kafka.model.kafka.KafkaResources;
import io.strimzi.api.kafka.model.nodepool.ProcessRoles;
import io.strimzi.operator.common.Annotations;
import io.strimzi.systemtest.resources.NamespaceManager;
import io.strimzi.systemtest.resources.crd.KafkaNodePoolResource;
import io.strimzi.systemtest.templates.crd.KafkaNodePoolTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTemplates;
Expand All @@ -24,11 +25,15 @@
import io.strimzi.test.TestUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;

import java.io.File;
import java.io.IOException;
import java.util.Map;

import static io.strimzi.systemtest.Environment.TEST_SUITE_NAMESPACE;
import static io.strimzi.systemtest.TestConstants.CO_NAMESPACE;
import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient;
import static io.strimzi.test.k8s.KubeClusterResource.kubeClient;
import static org.junit.jupiter.api.Assertions.fail;
Expand Down Expand Up @@ -216,4 +221,18 @@ protected void applyCustomResourcesFromPath(String namespaceName, String example
LOGGER.info("Deploying KafkaTopic from: {}, in Namespace {}", kafkaTopicYaml.getPath(), namespaceName);
cmdKubeClient(namespaceName).applyContent(TestUtils.readFile(kafkaTopicYaml));
}

@BeforeEach
void setupEnvironment() {
NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE);
NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE);
}

@AfterEach
void afterEach() {
cleanUpKafkaTopics(TEST_SUITE_NAMESPACE);
deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir);
NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE);
NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ void testDowngradeOfKafkaConnectAndKafkaConnector() throws IOException {
final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext());
UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(bundleDowngradeVersionData.getDeployKafkaVersion());

doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, TEST_SUITE_NAMESPACE, bundleDowngradeVersionData, testStorage, upgradeKafkaVersion);
doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, bundleDowngradeVersionData, testStorage, upgradeKafkaVersion);
}

private void performDowngrade(String clusterOperatorNamespaceName, String componentsNamespaceName, BundleVersionModificationData downgradeData) throws IOException {
Expand All @@ -70,7 +70,7 @@ private void performDowngrade(String clusterOperatorNamespaceName, String compon
// We support downgrade only when you didn't upgrade to new inter.broker.protocol.version and log.message.format.version
// https://strimzi.io/docs/operators/latest/full/deploying.html#con-target-downgrade-version-str

setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, componentsNamespaceName, downgradeData, testStorage, testUpgradeKafkaVersion);
setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, downgradeData, testStorage, testUpgradeKafkaVersion);
logClusterOperatorPodImage(clusterOperatorNamespaceName);

boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(componentsNamespaceName, eoSelector);
Expand All @@ -97,12 +97,20 @@ private void performDowngrade(String clusterOperatorNamespaceName, String compon
@BeforeEach
void setupEnvironment() {
NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE);

if (!CO_NAMESPACE.equals(TEST_SUITE_NAMESPACE)) {
NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE);
}
}

@AfterEach
void afterEach() {
cleanUpKafkaTopics();
cleanUpKafkaTopics(TEST_SUITE_NAMESPACE);
deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir);
NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE);

if (!CO_NAMESPACE.equals(TEST_SUITE_NAMESPACE)) {
NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.extension.ExtensionContext;
Expand Down Expand Up @@ -68,38 +69,38 @@ void testUpgradeKafkaWithoutVersion() throws IOException {
final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext());

// Setup env
setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage, upgradeKafkaVersion);
setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, acrossUpgradeData, testStorage, upgradeKafkaVersion);

Map<String, String> controllerSnapshot = PodUtils.podSnapshot(TEST_SUITE_NAMESPACE, controllerSelector);
Map<String, String> brokerSnapshot = PodUtils.podSnapshot(TEST_SUITE_NAMESPACE, brokerSelector);
Map<String, String> eoSnapshot = PodUtils.podSnapshot(TEST_SUITE_NAMESPACE, eoSelector);
Map<String, String> controllerSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), controllerSelector);
Map<String, String> brokerSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), brokerSelector);
Map<String, String> eoSnapshot = PodUtils.podSnapshot(testStorage.getNamespaceName(), eoSelector);

// Make snapshots of all Pods
makeComponentsSnapshots(TEST_SUITE_NAMESPACE);
makeComponentsSnapshots(testStorage.getNamespaceName());

// Check if UTO is used before changing the CO -> used for check for KafkaTopics
boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TEST_SUITE_NAMESPACE, eoSelector);
boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector);

// Upgrade CO
changeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData);
changeClusterOperator(CO_NAMESPACE, testStorage.getNamespaceName(), acrossUpgradeData);
logClusterOperatorPodImage(CO_NAMESPACE);
logComponentsPodImages(TEST_SUITE_NAMESPACE);
logComponentsPodImages(testStorage.getNamespaceName());

RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TEST_SUITE_NAMESPACE, controllerSelector, 3, controllerSnapshot);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TEST_SUITE_NAMESPACE, brokerSelector, 3, brokerSnapshot);
DeploymentUtils.waitTillDepHasRolled(TEST_SUITE_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoSnapshot);
checkAllComponentsImages(TEST_SUITE_NAMESPACE, acrossUpgradeData);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controllerSelector, 3, controllerSnapshot);
RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSelector, 3, brokerSnapshot);
DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoSnapshot);
checkAllComponentsImages(testStorage.getNamespaceName(), acrossUpgradeData);

// Verify that Pods are stable
PodUtils.verifyThatRunningPodsAreStable(TEST_SUITE_NAMESPACE, clusterName);
PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName);
// Verify upgrade
verifyProcedure(TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore);
verifyProcedure(testStorage.getNamespaceName(), acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore);

String controllerPodName = kubeClient().listPodsByPrefixInName(TEST_SUITE_NAMESPACE, KafkaResource.getStrimziPodSetName(clusterName, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName();
String brokerPodName = kubeClient().listPodsByPrefixInName(TEST_SUITE_NAMESPACE, KafkaResource.getStrimziPodSetName(clusterName, BROKER_NODE_NAME)).get(0).getMetadata().getName();
String controllerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, CONTROLLER_NODE_NAME)).get(0).getMetadata().getName();
String brokerPodName = kubeClient().listPodsByPrefixInName(testStorage.getNamespaceName(), KafkaResource.getStrimziPodSetName(clusterName, BROKER_NODE_NAME)).get(0).getMetadata().getName();

assertThat(KafkaUtils.getVersionFromKafkaPodLibs(TEST_SUITE_NAMESPACE, controllerPodName), containsString(acrossUpgradeData.getProcedures().getVersion()));
assertThat(KafkaUtils.getVersionFromKafkaPodLibs(TEST_SUITE_NAMESPACE, brokerPodName), containsString(acrossUpgradeData.getProcedures().getVersion()));
assertThat(KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), controllerPodName), containsString(acrossUpgradeData.getProcedures().getVersion()));
assertThat(KafkaUtils.getVersionFromKafkaPodLibs(testStorage.getNamespaceName(), brokerPodName), containsString(acrossUpgradeData.getProcedures().getVersion()));
}

@IsolatedTest
Expand All @@ -108,66 +109,66 @@ void testUpgradeAcrossVersionsWithUnsupportedKafkaVersion() throws IOException {
UpgradeKafkaVersion upgradeKafkaVersion = UpgradeKafkaVersion.getKafkaWithVersionFromUrl(acrossUpgradeData.getFromKafkaVersionsUrl(), acrossUpgradeData.getStartingKafkaVersion());

// Setup env
setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage, upgradeKafkaVersion);
setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, acrossUpgradeData, testStorage, upgradeKafkaVersion);

// Make snapshots of all Pods
makeComponentsSnapshots(TEST_SUITE_NAMESPACE);
makeComponentsSnapshots(testStorage.getNamespaceName());

// Check if UTO is used before changing the CO -> used for check for KafkaTopics
boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TEST_SUITE_NAMESPACE, eoSelector);
boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector);

// Upgrade CO
changeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData);
changeClusterOperator(CO_NAMESPACE, testStorage.getNamespaceName(), acrossUpgradeData);

waitForKafkaClusterRollingUpdate(TEST_SUITE_NAMESPACE);
waitForKafkaClusterRollingUpdate(testStorage.getNamespaceName());

logPodImages(CO_NAMESPACE);

// Upgrade kafka
changeKafkaAndMetadataVersion(TEST_SUITE_NAMESPACE, acrossUpgradeData, true);
changeKafkaAndMetadataVersion(testStorage.getNamespaceName(), acrossUpgradeData, true);

logPodImages(CO_NAMESPACE);

checkAllComponentsImages(TEST_SUITE_NAMESPACE, acrossUpgradeData);
checkAllComponentsImages(testStorage.getNamespaceName(), acrossUpgradeData);

// Verify that Pods are stable
PodUtils.verifyThatRunningPodsAreStable(TEST_SUITE_NAMESPACE, clusterName);
PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName);

// Verify upgrade
verifyProcedure(TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore);
verifyProcedure(testStorage.getNamespaceName(), acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore);
}

@IsolatedTest
void testUpgradeAcrossVersionsWithNoKafkaVersion() throws IOException {
final TestStorage testStorage = new TestStorage(ResourceManager.getTestContext());

// Setup env
setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage, null);
setupEnvAndUpgradeClusterOperator(CO_NAMESPACE, acrossUpgradeData, testStorage, null);

// Check if UTO is used before changing the CO -> used for check for KafkaTopics
boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(TEST_SUITE_NAMESPACE, eoSelector);
boolean wasUTOUsedBefore = StUtils.isUnidirectionalTopicOperatorUsed(testStorage.getNamespaceName(), eoSelector);

// Upgrade CO
changeClusterOperator(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData);
changeClusterOperator(CO_NAMESPACE, testStorage.getNamespaceName(), acrossUpgradeData);

// Wait till first upgrade finished
controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TEST_SUITE_NAMESPACE, controllerSelector, 3, controllerPods);
brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(TEST_SUITE_NAMESPACE, brokerSelector, 3, brokerPods);
eoPods = DeploymentUtils.waitTillDepHasRolled(TEST_SUITE_NAMESPACE, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods);
controllerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controllerSelector, 3, controllerPods);
brokerPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSelector, 3, brokerPods);
eoPods = DeploymentUtils.waitTillDepHasRolled(testStorage.getNamespaceName(), KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPods);

LOGGER.info("Rolling to new images has finished!");
logPodImages(CO_NAMESPACE);

// Upgrade kafka
changeKafkaAndMetadataVersion(TEST_SUITE_NAMESPACE, acrossUpgradeData);
logComponentsPodImages(TEST_SUITE_NAMESPACE);
checkAllComponentsImages(TEST_SUITE_NAMESPACE, acrossUpgradeData);
changeKafkaAndMetadataVersion(testStorage.getNamespaceName(), acrossUpgradeData);
logComponentsPodImages(testStorage.getNamespaceName());
checkAllComponentsImages(testStorage.getNamespaceName(), acrossUpgradeData);

// Verify that Pods are stable
PodUtils.verifyThatRunningPodsAreStable(TEST_SUITE_NAMESPACE, clusterName);
PodUtils.verifyThatRunningPodsAreStable(testStorage.getNamespaceName(), clusterName);

// Verify upgrade
verifyProcedure(TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore);
verifyProcedure(testStorage.getNamespaceName(), acrossUpgradeData, testStorage.getContinuousProducerName(), testStorage.getContinuousConsumerName(), wasUTOUsedBefore);
}

@MicroShiftNotSupported("Due to lack of Kafka Connect build feature")
Expand All @@ -177,7 +178,7 @@ void testUpgradeOfKafkaConnectAndKafkaConnector(final ExtensionContext extension
final TestStorage testStorage = new TestStorage(extensionContext);
final UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion(acrossUpgradeData.getDefaultKafka());

doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, TEST_SUITE_NAMESPACE, acrossUpgradeData, testStorage, upgradeKafkaVersion);
doKafkaConnectAndKafkaConnectorUpgradeOrDowngradeProcedure(CO_NAMESPACE, acrossUpgradeData, testStorage, upgradeKafkaVersion);
}

private void performUpgrade(String clusterOperatorNamespaceName, String componentsNamespaceName, BundleVersionModificationData upgradeData) throws IOException {
Expand All @@ -187,7 +188,7 @@ private void performUpgrade(String clusterOperatorNamespaceName, String componen
UpgradeKafkaVersion upgradeKafkaVersion = new UpgradeKafkaVersion();

// Setup env
setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, componentsNamespaceName, upgradeData, testStorage, upgradeKafkaVersion);
setupEnvAndUpgradeClusterOperator(clusterOperatorNamespaceName, upgradeData, testStorage, upgradeKafkaVersion);

// Upgrade CO to HEAD
logClusterOperatorPodImage(clusterOperatorNamespaceName);
Expand Down Expand Up @@ -220,5 +221,20 @@ private void performUpgrade(String clusterOperatorNamespaceName, String componen
@BeforeEach
void setupEnvironment() {
NamespaceManager.getInstance().createNamespaceAndPrepare(CO_NAMESPACE);

if (!CO_NAMESPACE.equals(TEST_SUITE_NAMESPACE)) {
NamespaceManager.getInstance().createNamespaceAndPrepare(TEST_SUITE_NAMESPACE);
}
}

@AfterEach
void afterEach() {
cleanUpKafkaTopics(TEST_SUITE_NAMESPACE);
deleteInstalledYamls(CO_NAMESPACE, TEST_SUITE_NAMESPACE, coDir);
NamespaceManager.getInstance().deleteNamespaceWithWait(CO_NAMESPACE);

if (!CO_NAMESPACE.equals(TEST_SUITE_NAMESPACE)) {
NamespaceManager.getInstance().deleteNamespaceWithWait(TEST_SUITE_NAMESPACE);
}
}
}
Loading

0 comments on commit 5167d99

Please sign in to comment.