diff --git a/pkg/backup/backup/backup_cleaner.go b/pkg/backup/backup/backup_cleaner.go index 829a1697ef7..f718dfe3c76 100644 --- a/pkg/backup/backup/backup_cleaner.go +++ b/pkg/backup/backup/backup_cleaner.go @@ -279,6 +279,11 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st }, } + if backup.Spec.AutomountServiceAccountToken != nil && !*backup.Spec.AutomountServiceAccountToken { + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, util.SATokenProjectionVolume()) + podSpec.Spec.Containers[0].VolumeMounts = append(podSpec.Spec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: backup.GetCleanJobName(), @@ -479,6 +484,11 @@ func (bc *backupCleaner) makeStopLogBackupJob(backup *v1alpha1.Backup) (*batchv1 }, } + if backup.Spec.AutomountServiceAccountToken != nil && !*backup.Spec.AutomountServiceAccountToken { + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, util.SATokenProjectionVolume()) + podSpec.Spec.Containers[0].VolumeMounts = append(podSpec.Spec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, diff --git a/pkg/backup/backup/backup_manager.go b/pkg/backup/backup/backup_manager.go index 77991925e35..417c9535804 100644 --- a/pkg/backup/backup/backup_manager.go +++ b/pkg/backup/backup/backup_manager.go @@ -651,6 +651,11 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s }, } + if backup.Spec.AutomountServiceAccountToken != nil && !*backup.Spec.AutomountServiceAccountToken { + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, util.SATokenProjectionVolume()) + podSpec.Spec.Containers[0].VolumeMounts = append(podSpec.Spec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: backup.GetBackupJobName(), @@ -880,6 +885,11 @@ func (bm *backupManager) makeBRBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, }, } + if backup.Spec.AutomountServiceAccountToken != nil && !*backup.Spec.AutomountServiceAccountToken { + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, util.SATokenProjectionVolume()) + podSpec.Spec.Containers[0].VolumeMounts = append(podSpec.Spec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, diff --git a/pkg/backup/restore/restore_manager.go b/pkg/backup/restore/restore_manager.go index be5c8244cb1..94fc37aa5e2 100644 --- a/pkg/backup/restore/restore_manager.go +++ b/pkg/backup/restore/restore_manager.go @@ -858,6 +858,11 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job }, } + if restore.Spec.AutomountServiceAccountToken != nil && !*restore.Spec.AutomountServiceAccountToken { + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, util.SATokenProjectionVolume()) + podSpec.Spec.Containers[0].VolumeMounts = append(podSpec.Spec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: restore.GetRestoreJobName(), @@ -1097,6 +1102,11 @@ func (rm *restoreManager) makeRestoreJobWithMode(restore *v1alpha1.Restore, isPr }, } + if restore.Spec.AutomountServiceAccountToken != nil && !*restore.Spec.AutomountServiceAccountToken { + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, util.SATokenProjectionVolume()) + podSpec.Spec.Containers[0].VolumeMounts = append(podSpec.Spec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + // Job name differs between restore and prune jobs jobName := restore.GetRestoreJobName() if isPruneJob { diff --git a/pkg/controller/compactbackup/compact_backup_controller.go b/pkg/controller/compactbackup/compact_backup_controller.go index b862a358172..0bbf1bfc77c 100644 --- a/pkg/controller/compactbackup/compact_backup_controller.go +++ b/pkg/controller/compactbackup/compact_backup_controller.go @@ -469,6 +469,11 @@ func (c *Controller) makeCompactJob(compact *v1alpha1.CompactBackup) (*batchv1.J }, } + if compact.Spec.AutomountServiceAccountToken != nil && !*compact.Spec.AutomountServiceAccountToken { + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, util.SATokenProjectionVolume()) + podSpec.Spec.Containers[0].VolumeMounts = append(podSpec.Spec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, diff --git a/pkg/manager/member/tidb_discovery_manager.go b/pkg/manager/member/tidb_discovery_manager.go index 3076f987fae..6635f13f1ed 100644 --- a/pkg/manager/member/tidb_discovery_manager.go +++ b/pkg/manager/member/tidb_discovery_manager.go @@ -311,6 +311,11 @@ func (m *realTidbDiscoveryManager) getTidbDiscoveryDeployment(obj metav1.Object) }) } + if baseSpec.AutomountServiceAccountToken() != nil && !*baseSpec.AutomountServiceAccountToken() { + podSpec.Volumes = append(podSpec.Volumes, util.SATokenProjectionVolume()) + podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, util.SATokenProjectionVolumeMount()) + } + podLabels := util.CombineStringMap(l.Labels(), baseSpec.Labels()) podAnnotations := baseSpec.Annotations() d := &appsv1.Deployment{ diff --git a/pkg/util/util.go b/pkg/util/util.go index 97a37e8a044..c3f7d2de556 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -59,8 +59,66 @@ var ( const ( // LastAppliedConfigAnnotation is annotation key of last applied configuration LastAppliedConfigAnnotation = "pingcap.com/last-applied-configuration" + + // SATokenProjectionVolumeName is the name of the projected service account token volume. + SATokenProjectionVolumeName = "kube-api-access" + // SATokenProjectionMountPath is the standard Kubernetes service account token mount path. + SATokenProjectionMountPath = "/var/run/secrets/kubernetes.io/serviceaccount" // nolint:gosec ) +// SATokenProjectionVolume returns a projected volume that replicates the three files +// that rest.InClusterConfig() reads from /var/run/secrets/kubernetes.io/serviceaccount: +// token, ca.crt, and namespace. Use this when automountServiceAccountToken is false +// but the container still needs to call the Kubernetes API. +func SATokenProjectionVolume() corev1.Volume { + expirationSeconds := int64(3607) + return corev1.Volume{ + Name: SATokenProjectionVolumeName, + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ServiceAccountToken: &corev1.ServiceAccountTokenProjection{ + Path: "token", + ExpirationSeconds: &expirationSeconds, + }, + }, + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: "kube-root-ca.crt"}, + Items: []corev1.KeyToPath{ + {Key: "ca.crt", Path: "ca.crt"}, + }, + }, + }, + { + DownwardAPI: &corev1.DownwardAPIProjection{ + Items: []corev1.DownwardAPIVolumeFile{ + { + Path: "namespace", + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +// SATokenProjectionVolumeMount returns the VolumeMount for SATokenProjectionVolume. +func SATokenProjectionVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{ + Name: SATokenProjectionVolumeName, + MountPath: SATokenProjectionMountPath, + ReadOnly: true, + } +} + func GetOrdinalFromPodName(podName string) (int32, error) { ordinalStr := podName[strings.LastIndex(podName, "-")+1:] ordinalInt, err := strconv.ParseInt(ordinalStr, 10, 32) diff --git a/tests/config.go b/tests/config.go index aef020e43cd..0c4a017d146 100644 --- a/tests/config.go +++ b/tests/config.go @@ -41,6 +41,7 @@ type Config struct { TidbVersions string `yaml:"tidb_versions" json:"tidb_versions"` InstallOperator bool `yaml:"install_opeartor" json:"install_opeartor"` + InstallCertManager bool `yaml:"install_cert_manager" json:"install_cert_manager"` InstallDMMysql bool `yaml:"install_dm_mysql" json:"install_dm_mysql"` OperatorTag string `yaml:"operator_tag" json:"operator_tag"` OperatorImage string `yaml:"operator_image" json:"operator_image"` @@ -96,6 +97,7 @@ type Node struct { func NewDefaultConfig() *Config { return &Config{ AdditionalDrainerVersion: "v3.0.8", + InstallCertManager: true, PDMaxReplicas: 5, TiDBTokenLimit: 1024, diff --git a/tests/e2e/br/br.go b/tests/e2e/br/br.go index 048527b3c81..33503167de3 100644 --- a/tests/e2e/br/br.go +++ b/tests/e2e/br/br.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" "github.com/pingcap/tidb-operator/pkg/apis/util/config" + utilpkg "github.com/pingcap/tidb-operator/pkg/util" e2eframework "github.com/pingcap/tidb-operator/tests/e2e/br/framework" brutil "github.com/pingcap/tidb-operator/tests/e2e/br/framework/br" "github.com/pingcap/tidb-operator/tests/e2e/br/utils/portforward" @@ -324,6 +325,97 @@ var _ = ginkgo.Describe("Backup and Restore", func() { } }) + ginkgo.It("backup and restore should work with automountServiceAccountToken disabled", func() { + backupClusterName := "backup-restore-no-sa-token" + restoreClusterName := "restore-no-sa-token" + backupVersion := utilimage.TiDBLatest + enableTLS := false + skipCA := false + dbName := "e2etest" + backupName := backupClusterName + restoreName := restoreClusterName + typ := strings.ToLower(typeBR) + disableAutomount := false + + ns := f.Namespace.Name + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ginkgo.By("Create TiDB cluster for backup") + err := createTidbClusterWithConfig(f, backupClusterName, backupVersion, enableTLS, skipCA, func(tc *v1alpha1.TidbCluster) { + setAllTidbClusterComponentsAutomountServiceAccountToken(tc, &disableAutomount) + }) + framework.ExpectNoError(err) + + ginkgo.By("Create TiDB cluster for restore") + err = createTidbClusterWithConfig(f, restoreClusterName, backupVersion, enableTLS, skipCA, func(tc *v1alpha1.TidbCluster) { + setAllTidbClusterComponentsAutomountServiceAccountToken(tc, &disableAutomount) + }) + framework.ExpectNoError(err) + + ginkgo.By("Wait for backup TiDB cluster ready") + err = utiltidbcluster.WaitForTCConditionReady(f.ExtClient, ns, backupClusterName, tidbReadyTimeout, 0) + framework.ExpectNoError(err) + + ginkgo.By("Wait for restore TiDB cluster ready") + err = utiltidbcluster.WaitForTCConditionReady(f.ExtClient, ns, restoreClusterName, tidbReadyTimeout, 0) + framework.ExpectNoError(err) + + ginkgo.By("Forward backup TiDB cluster service") + backupHost, err := portforward.ForwardOnePort(ctx, f.PortForwarder, ns, getTiDBServiceResourceName(backupClusterName), int(v1alpha1.DefaultTiDBServerPort)) + framework.ExpectNoError(err) + err = initDatabase(backupHost, dbName) + framework.ExpectNoError(err) + + ginkgo.By("Write data into backup TiDB cluster") + backupDSN := getDefaultDSN(backupHost, dbName) + err = blockwriter.New().Write(context.Background(), backupDSN) + framework.ExpectNoError(err) + + ginkgo.By("Create RBAC for backup and restore") + err = createRBAC(f) + framework.ExpectNoError(err) + + ginkgo.By("Create backup with automountServiceAccountToken disabled") + backup, err := createBackupAndWaitForComplete(f, backupName, backupClusterName, typ, func(backup *v1alpha1.Backup) { + backup.Spec.AutomountServiceAccountToken = &disableAutomount + }) + framework.ExpectNoError(err) + + backupJob, err := f.ClientSet.BatchV1().Jobs(ns).Get(context.TODO(), backup.GetBackupJobName(), metav1.GetOptions{}) + framework.ExpectNoError(err) + framework.ExpectNoError(assertProjectedSATokenPodSpec(&backupJob.Spec.Template.Spec)) + + backupPod, err := waitForJobPod(f, ns, backup.GetBackupJobName(), 2*time.Minute) + framework.ExpectNoError(err) + framework.ExpectNoError(assertProjectedSATokenPodSpec(&backupPod.Spec)) + + ginkgo.By("Create restore with automountServiceAccountToken disabled") + err = createRestoreAndWaitForComplete(f, restoreName, restoreClusterName, typ, backupName, func(restore *v1alpha1.Restore) { + restore.Spec.AutomountServiceAccountToken = &disableAutomount + }) + framework.ExpectNoError(err) + + restore, err := f.ExtClient.PingcapV1alpha1().Restores(ns).Get(context.TODO(), restoreName, metav1.GetOptions{}) + framework.ExpectNoError(err) + restoreJob, err := f.ClientSet.BatchV1().Jobs(ns).Get(context.TODO(), restore.GetRestoreJobName(), metav1.GetOptions{}) + framework.ExpectNoError(err) + framework.ExpectNoError(assertProjectedSATokenPodSpec(&restoreJob.Spec.Template.Spec)) + + restorePod, err := waitForJobPod(f, ns, restore.GetRestoreJobName(), 2*time.Minute) + framework.ExpectNoError(err) + framework.ExpectNoError(assertProjectedSATokenPodSpec(&restorePod.Spec)) + + ginkgo.By("Forward restore TiDB cluster service") + restoreHost, err := portforward.ForwardOnePort(ctx, f.PortForwarder, ns, getTiDBServiceResourceName(restoreClusterName), int(v1alpha1.DefaultTiDBServerPort)) + framework.ExpectNoError(err) + + ginkgo.By("Validate restore result") + restoreDSN := getDefaultDSN(restoreHost, dbName) + err = checkDataIsSame(backupDSN, restoreDSN) + framework.ExpectNoError(err) + }) + ginkgo.Context("[Backup Clean]", func() { ginkgo.It("clean bakcup files with policy Delete", func() { backupClusterName := "backup-clean" @@ -1390,6 +1482,10 @@ func getPDServiceResourceName(tcName string) string { } func createTidbCluster(f *e2eframework.Framework, name string, version string, enableTLS bool, skipCA bool) error { + return createTidbClusterWithConfig(f, name, version, enableTLS, skipCA, nil) +} + +func createTidbClusterWithConfig(f *e2eframework.Framework, name string, version string, enableTLS bool, skipCA bool, configure func(*v1alpha1.TidbCluster)) error { ns := f.Namespace.Name // TODO: change to use tidbclusterutil like brutil tc := fixture.GetTidbClusterWithoutPDMS(ns, name, version) @@ -1405,6 +1501,9 @@ func createTidbCluster(f *e2eframework.Framework, name string, version string, e return err } } + if configure != nil { + configure(tc) + } if _, err := f.ExtClient.PingcapV1alpha1().TidbClusters(ns).Create(context.TODO(), tc, metav1.CreateOptions{}); err != nil { return err @@ -1413,6 +1512,48 @@ func createTidbCluster(f *e2eframework.Framework, name string, version string, e return nil } +func setAllTidbClusterComponentsAutomountServiceAccountToken(tc *v1alpha1.TidbCluster, value *bool) { + if tc.Spec.Discovery.ComponentSpec == nil { + tc.Spec.Discovery.ComponentSpec = &v1alpha1.ComponentSpec{} + } + tc.Spec.Discovery.AutomountServiceAccountToken = value + + if tc.Spec.PD != nil { + tc.Spec.PD.AutomountServiceAccountToken = value + } + for _, pdms := range tc.Spec.PDMS { + if pdms != nil { + pdms.AutomountServiceAccountToken = value + } + } + if tc.Spec.TiDB != nil { + tc.Spec.TiDB.AutomountServiceAccountToken = value + } + if tc.Spec.TiKV != nil { + tc.Spec.TiKV.AutomountServiceAccountToken = value + } + if tc.Spec.TiFlash != nil { + tc.Spec.TiFlash.AutomountServiceAccountToken = value + } + if tc.Spec.TiCDC != nil { + tc.Spec.TiCDC.AutomountServiceAccountToken = value + } + if tc.Spec.Pump != nil { + tc.Spec.Pump.AutomountServiceAccountToken = value + } + if tc.Spec.TiProxy != nil { + tc.Spec.TiProxy.AutomountServiceAccountToken = value + } + if tc.Spec.TiCI != nil { + if tc.Spec.TiCI.Meta != nil { + tc.Spec.TiCI.Meta.AutomountServiceAccountToken = value + } + if tc.Spec.TiCI.Worker != nil { + tc.Spec.TiCI.Worker.AutomountServiceAccountToken = value + } + } +} + // createLogBackupEnableTidbCluster create tidb cluster and set "log-backup.enable = true" in tikv to enable log backup. func createLogBackupEnableTidbCluster(f *e2eframework.Framework, name string, version string, enableTLS bool, skipCA bool) error { ns := f.Namespace.Name @@ -1801,6 +1942,58 @@ func createRestoreAndWaitForComplete(f *e2eframework.Framework, name, tcName, ty return nil } +func waitForJobPod(f *e2eframework.Framework, ns, jobName string, timeout time.Duration) (*v1.Pod, error) { + var jobPod *v1.Pod + err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + pods, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("job-name=%s", jobName), + }) + if err != nil { + return false, err + } + if len(pods.Items) == 0 { + return false, nil + } + jobPod = pods.Items[0].DeepCopy() + return true, nil + }) + if err != nil { + return nil, err + } + return jobPod, nil +} + +func assertProjectedSATokenPodSpec(podSpec *v1.PodSpec) error { + if podSpec.AutomountServiceAccountToken == nil || *podSpec.AutomountServiceAccountToken { + return fmt.Errorf("expected automountServiceAccountToken=false, got %v", podSpec.AutomountServiceAccountToken) + } + + hasProjectedVolume := false + for _, volume := range podSpec.Volumes { + if volume.Name != utilpkg.SATokenProjectionVolumeName { + continue + } + if volume.Projected == nil { + return fmt.Errorf("volume %s should be projected", utilpkg.SATokenProjectionVolumeName) + } + hasProjectedVolume = true + break + } + if !hasProjectedVolume { + return fmt.Errorf("projected service account token volume %s not found", utilpkg.SATokenProjectionVolumeName) + } + + for _, container := range podSpec.Containers { + for _, volumeMount := range container.VolumeMounts { + if volumeMount.Name == utilpkg.SATokenProjectionVolumeName && volumeMount.MountPath == utilpkg.SATokenProjectionMountPath && volumeMount.ReadOnly { + return nil + } + } + } + + return fmt.Errorf("projected service account token volume mount %s not found", utilpkg.SATokenProjectionMountPath) +} + func getDefaultDSN(host, dbName string) string { user := "root" password := "" diff --git a/tests/e2e/config/config.go b/tests/e2e/config/config.go index 7eac1669ea3..15bec18f6df 100644 --- a/tests/e2e/config/config.go +++ b/tests/e2e/config/config.go @@ -38,6 +38,7 @@ func RegisterTiDBOperatorFlags(flags *flag.FlagSet) { flags.IntVar(&TestConfig.FaultTriggerPort, "fault-trigger-port", 23332, "the http port of fault trigger service") flags.StringVar(&TestConfig.E2EImage, "e2e-image", "", "e2e image") flags.BoolVar(&TestConfig.InstallOperator, "install-operator", true, "install a default operator") + flags.BoolVar(&TestConfig.InstallCertManager, "install-cert-manager", true, "install cert-manager for e2e test") flags.BoolVar(&TestConfig.InstallDMMysql, "install-dm-mysql", true, "install mysql and tidb for dm test") flags.StringVar(&TestConfig.OperatorTag, "operator-tag", "master", "operator tag used to choose charts") flags.StringVar(&TestConfig.OperatorImage, "operator-image", "pingcap/tidb-operator:latest", "operator image") diff --git a/tests/e2e/e2e.go b/tests/e2e/e2e.go index 6bf4e8c28d2..5379c32f9d0 100644 --- a/tests/e2e/e2e.go +++ b/tests/e2e/e2e.go @@ -69,6 +69,31 @@ var ( operatorKillerStopCh chan struct{} ) +func buildKubectlCmd(args string) string { + cmd := framework.TestContext.KubectlPath + if cmd == "" { + cmd = "kubectl" + } + if framework.TestContext.KubeConfig != "" { + cmd += fmt.Sprintf(" --kubeconfig=%q", framework.TestContext.KubeConfig) + } + if framework.TestContext.KubeContext != "" { + cmd += fmt.Sprintf(" --context=%q", framework.TestContext.KubeContext) + } + return fmt.Sprintf("%s %s", cmd, args) +} + +func buildHelmCmd(args string) string { + cmd := "helm" + if framework.TestContext.KubeConfig != "" { + cmd += fmt.Sprintf(" --kubeconfig=%q", framework.TestContext.KubeConfig) + } + if framework.TestContext.KubeContext != "" { + cmd += fmt.Sprintf(" --kube-context=%q", framework.TestContext.KubeContext) + } + return fmt.Sprintf("%s %s", cmd, args) +} + // This is modified from framework.SetupSuite(). // setupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. // There are certain operations we only want to run once per overall test invocation @@ -164,19 +189,19 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { }{ { text: "Clear all helm releases", - cmd: "helm ls --all --short | xargs -n 1 -r helm uninstall", + cmd: fmt.Sprintf("%s | xargs -n 1 -r %s", buildHelmCmd("ls --all --short"), buildHelmCmd("uninstall")), }, { text: "Clear tidb-operator apiservices", - cmd: "kubectl delete apiservices -l app.kubernetes.io/name=tidb-operator", + cmd: buildKubectlCmd("delete apiservices -l app.kubernetes.io/name=tidb-operator"), }, { text: "Clear tidb-operator validatingwebhookconfigurations", - cmd: "kubectl delete validatingwebhookconfiguration -l app.kubernetes.io/name=tidb-operator", + cmd: buildKubectlCmd("delete validatingwebhookconfiguration -l app.kubernetes.io/name=tidb-operator"), }, { text: "Clear tidb-operator mutatingwebhookconfigurations", - cmd: "kubectl delete mutatingwebhookconfiguration -l app.kubernetes.io/name=tidb-operator", + cmd: buildKubectlCmd("delete mutatingwebhookconfiguration -l app.kubernetes.io/name=tidb-operator"), }, } for _, p := range cleaners { @@ -297,9 +322,13 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { ginkgo.By("Skip installing tidb-operator") } - ginkgo.By("Installing cert-manager") - err = tidbcluster.InstallCertManager(kubeCli) - framework.ExpectNoError(err, "failed to install cert-manager") + if e2econfig.TestConfig.InstallCertManager { + ginkgo.By("Installing cert-manager") + err = tidbcluster.InstallCertManager(kubeCli) + framework.ExpectNoError(err, "failed to install cert-manager") + } else { + ginkgo.By("Skip installing cert-manager") + } return nil }, func(data []byte) { // Run on all Ginkgo nodes @@ -319,15 +348,20 @@ var _ = ginkgo.SynchronizedAfterSuite(func() { config.Burst = 50 cli, _ := versioned.NewForConfig(config) kubeCli, _ := kubernetes.NewForConfig(config) + var err error if !ginkgo.CurrentGinkgoTestDescription().Failed { ginkgo.By("Clean labels") - err := tests.CleanNodeLabels(kubeCli) + err = tests.CleanNodeLabels(kubeCli) framework.ExpectNoError(err, "failed to clean labels") } - ginkgo.By("Deleting cert-manager") - err := tidbcluster.DeleteCertManager(kubeCli) - framework.ExpectNoError(err, "failed to delete cert-manager") + if e2econfig.TestConfig.InstallCertManager { + ginkgo.By("Deleting cert-manager") + err = tidbcluster.DeleteCertManager(kubeCli) + framework.ExpectNoError(err, "failed to delete cert-manager") + } else { + ginkgo.By("Skip deleting cert-manager") + } err = tests.CleanDMMySQL(kubeCli, tests.DMMySQLNamespace) framework.ExpectNoError(err, "failed to clean DM MySQL")