From adac6405bd9601e5d9bbc4fed51f8cca10ac7192 Mon Sep 17 00:00:00 2001 From: rknaur Date: Tue, 12 Nov 2024 09:46:30 +0100 Subject: [PATCH] Add initial Rosa machine pool integration tests --- .../rosacontrolplane_controller.go | 22 +- exp/controllers/rosamachinepool_controller.go | 22 +- .../rosamachinepool_controller_test.go | 477 ++++++++++++++++++ exp/controllers/suite_test.go | 13 + pkg/cloud/scope/rosacontrolplane.go | 5 +- pkg/rosa/client.go | 10 +- pkg/rosa/idps.go | 7 +- pkg/rosa/ocmclient.go | 133 +++++ pkg/rosa/versions.go | 6 +- test/mocks/generate_capa.go | 5 +- test/mocks/ocm_client_mock.go | 380 ++++++++++++++ 11 files changed, 1053 insertions(+), 27 deletions(-) create mode 100644 pkg/rosa/ocmclient.go create mode 100644 test/mocks/ocm_client_mock.go diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index 5a5b07a718..b079f3fb32 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -31,6 +31,7 @@ import ( stsv2 "github.com/aws/aws-sdk-go-v2/service/sts" sts "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/google/go-cmp/cmp" idputils "github.com/openshift-online/ocm-common/pkg/idp/utils" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" @@ -40,6 +41,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/storage/names" @@ -58,6 +60,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" @@ -89,11 +92,15 @@ type ROSAControlPlaneReconciler struct { WatchFilterValue string WaitInfraPeriod time.Duration Endpoints []scope.ServiceEndpoint + NewStsClient func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI + NewOCMClient func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) } // SetupWithManager is used to setup the controller. func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { log := logger.FromContext(ctx) + r.NewOCMClient = rosa.NewOCMClient + r.NewStsClient = scope.NewSTSClient rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{} c, err := ctrl.NewControllerManagedBy(mgr). @@ -173,6 +180,7 @@ func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Req ControllerName: strings.ToLower(rosaControlPlaneKind), Endpoints: r.Endpoints, Logger: log, + NewStsClient: r.NewStsClient, }) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) @@ -203,8 +211,8 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc } } - ocmClient, err := rosa.NewOCMClient(ctx, rosaScope) - if err != nil { + ocmClient, err := r.NewOCMClient(ctx, rosaScope) + if err != nil || ocmClient == nil { // TODO: need to expose in status, as likely the credentials are invalid return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err) } @@ -332,7 +340,7 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc } ocmClient, err := rosa.NewOCMClient(ctx, rosaScope) - if err != nil { + if err != nil || ocmClient == nil { // TODO: need to expose in status, as likely the credentials are invalid return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err) } @@ -406,7 +414,7 @@ func (r *ROSAControlPlaneReconciler) deleteMachinePools(ctx context.Context, ros return len(machinePools) == 0, nil } -func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster) error { +func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient rosa.OCMClient, cluster *cmv1.Cluster) error { version := rosaScope.ControlPlane.Spec.Version if version == rosa.RawVersionID(cluster.Version()) { conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") @@ -461,7 +469,7 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO return nil } -func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster, creator *rosaaws.Creator) error { +func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAControlPlaneScope, ocmClient rosa.OCMClient, cluster *cmv1.Cluster, creator *rosaaws.Creator) error { ocmClusterSpec, updated := r.updateOCMClusterSpec(rosaScope.ControlPlane, cluster) if updated { @@ -758,7 +766,7 @@ func (r *ROSAControlPlaneReconciler) reconcileExternalAuthBootstrapKubeconfig(ct return nil } -func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, ocmClient *ocm.Client, cluster *cmv1.Cluster) error { +func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, ocmClient rosa.OCMClient, cluster *cmv1.Cluster) error { rosaScope.Debug("Reconciling ROSA kubeconfig for cluster", "cluster-name", rosaScope.RosaClusterName()) clusterRef := client.ObjectKeyFromObject(rosaScope.Cluster) @@ -870,7 +878,7 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterAdminPassword(ctx context.C return password, nil } -func validateControlPlaneSpec(ocmClient *ocm.Client, rosaScope *scope.ROSAControlPlaneScope) (string, error) { +func validateControlPlaneSpec(ocmClient rosa.OCMClient, rosaScope *scope.ROSAControlPlaneScope) (string, error) { version := rosaScope.ControlPlane.Spec.Version valid, err := ocmClient.ValidateHypershiftVersion(version, ocm.DefaultChannelGroup) if err != nil { diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index 41a8f15848..374d3b930c 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/blang/semver" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -16,6 +17,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" @@ -31,6 +33,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" @@ -48,11 +51,15 @@ type ROSAMachinePoolReconciler struct { Recorder record.EventRecorder WatchFilterValue string Endpoints []scope.ServiceEndpoint + NewStsClient func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI + NewOCMClient func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) } // SetupWithManager is used to setup the controller. func (r *ROSAMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { log := logger.FromContext(ctx) + r.NewOCMClient = rosa.NewOCMClient + r.NewStsClient = scope.NewSTSClient gvk, err := apiutil.GVKForObject(new(expinfrav1.ROSAMachinePool), mgr.GetScheme()) if err != nil { @@ -148,6 +155,7 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ ControlPlane: controlPlane, ControllerName: "rosaControlPlane", Endpoints: r.Endpoints, + NewStsClient: r.NewStsClient, }) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to create rosaControlPlane scope") @@ -186,8 +194,8 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, } } - ocmClient, err := rosa.NewOCMClient(ctx, rosaControlPlaneScope) - if err != nil { + ocmClient, err := r.NewOCMClient(ctx, rosaControlPlaneScope) + if err != nil || ocmClient == nil { // TODO: need to expose in status, as likely the credentials are invalid return ctrl.Result{}, fmt.Errorf("failed to create OCM client: %w", err) } @@ -197,7 +205,6 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, fmt.Errorf("failed to validate ROSAMachinePool.spec: %w", err) } if failureMessage != nil { - machinePoolScope.RosaMachinePool.Status.FailureMessage = failureMessage // dont' requeue because input is invalid and manual intervention is needed. return ctrl.Result{}, nil } @@ -220,7 +227,6 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, if err != nil { return ctrl.Result{}, err } - if found { if rosaMachinePool.Spec.AvailabilityZone == "" { // reflect the current AvailabilityZone in the spec if not set. @@ -298,8 +304,8 @@ func (r *ROSAMachinePoolReconciler) reconcileDelete( ) error { machinePoolScope.Info("Reconciling deletion of RosaMachinePool") - ocmClient, err := rosa.NewOCMClient(ctx, rosaControlPlaneScope) - if err != nil { + ocmClient, err := r.NewOCMClient(ctx, rosaControlPlaneScope) + if err != nil || ocmClient == nil { // TODO: need to expose in status, as likely the credentials are invalid return fmt.Errorf("failed to create OCM client: %w", err) } @@ -320,7 +326,7 @@ func (r *ROSAMachinePoolReconciler) reconcileDelete( return nil } -func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope *scope.RosaMachinePoolScope, ocmClient *ocm.Client, nodePool *cmv1.NodePool) error { +func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope *scope.RosaMachinePoolScope, ocmClient rosa.OCMClient, nodePool *cmv1.NodePool) error { version := machinePoolScope.RosaMachinePool.Spec.Version if version == "" || version == rosa.RawVersionID(nodePool.Version()) { conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") @@ -356,7 +362,7 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope return nil } -func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaMachinePoolScope, ocmClient *ocm.Client, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { +func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaMachinePoolScope, ocmClient rosa.OCMClient, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { machinePool := machinePoolScope.RosaMachinePool.DeepCopy() // default all fields before comparing, so that nil/unset fields don't cause an unnecessary update call. machinePool.Default() diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go index 0ff8ae0c83..463d0fbf05 100644 --- a/exp/controllers/rosamachinepool_controller_test.go +++ b/exp/controllers/rosamachinepool_controller_test.go @@ -1,17 +1,38 @@ package controllers import ( + "context" + "fmt" "testing" "time" + "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_stsiface" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" ) func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { @@ -60,3 +81,459 @@ func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { g.Expect(computeSpecDiff(rosaMachinePoolSpec, nodePoolSpec)).To(BeEmpty()) } + +func TestRosaMachinePoolReconcile(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-namespace") + g.Expect(err).ToNot(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rosa-secret", + Namespace: ns.Name, + }, + Data: map[string][]byte{ + "ocmToken": []byte("secret-ocm-token-string"), + }, + } + identity := &infrav1.AWSClusterControllerIdentity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: infrav1.AWSClusterControllerIdentitySpec{ + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ + AllowedNamespaces: &infrav1.AllowedNamespaces{}, + }, + }, + } + identity.SetGroupVersionKind(infrav1.GroupVersion.WithKind("AWSClusterStaticIdentity")) + + rosaControlPlane := func(i int) *rosacontrolplanev1.ROSAControlPlane { + return &rosacontrolplanev1.ROSAControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rosa-control-plane-%v", i), + Namespace: ns.Name}, + TypeMeta: metav1.TypeMeta{ + Kind: "ROSAControlPlane", + APIVersion: rosacontrolplanev1.GroupVersion.String(), + }, + Spec: rosacontrolplanev1.RosaControlPlaneSpec{ + RosaClusterName: fmt.Sprintf("rosa-control-plane-%v", i), + Subnets: []string{"subnet-0ac99a6230b408813", "subnet-1ac99a6230b408811"}, + AvailabilityZones: []string{"az-1", "az-2"}, + Network: &rosacontrolplanev1.NetworkSpec{ + MachineCIDR: "10.0.0.0/16", + PodCIDR: "10.128.0.0/14", + ServiceCIDR: "172.30.0.0/16", + }, + Region: "us-east-1", + Version: "4.15.20", + RolesRef: rosacontrolplanev1.AWSRolesRef{}, + OIDCID: "iodcid1", + InstallerRoleARN: "arn1", + WorkerRoleARN: "arn2", + SupportRoleARN: "arn3", + CredentialsSecretRef: &corev1.LocalObjectReference{ + Name: secret.Name, + }, + VersionGate: "Acknowledge", + IdentityRef: &infrav1.AWSIdentityReference{ + Name: identity.Name, + Kind: infrav1.ControllerIdentityKind, + }, + }, + Status: rosacontrolplanev1.RosaControlPlaneStatus{ + Ready: true, + ID: fmt.Sprintf("rosa-control-plane-%v", i), + }, + } + } + + ownerCluster := func(i int) *clusterv1.Cluster { + return &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("owner-cluster-%v", i), + Namespace: ns.Name, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Name: rosaControlPlane(i).Name, + Kind: "ROSAControlPlane", + APIVersion: rosacontrolplanev1.GroupVersion.String(), + }, + }, + } + } + + rosaMachinePool := func(i int) *expinfrav1.ROSAMachinePool { + return &expinfrav1.ROSAMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rosa-machinepool-%v", i), + Namespace: ns.Name, + UID: types.UID(fmt.Sprintf("rosa-machinepool-%v", i)), + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ROSAMachinePool", + APIVersion: expinfrav1.GroupVersion.String(), + }, + Spec: expinfrav1.RosaMachinePoolSpec{ + NodePoolName: "test-nodepool", + Version: "4.14.5", + Subnet: "subnet-id", + InstanceType: "m5.large", + }, + } + } + + ownerMachinePool := func(i int) *expclusterv1.MachinePool { + return &expclusterv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("machinepool-%v", i), + Namespace: ns.Name, + Labels: map[string]string{clusterv1.ClusterNameLabel: ownerCluster(i).Name}, + UID: types.UID(fmt.Sprintf("owner-mp-uid--%v", i)), + }, + TypeMeta: metav1.TypeMeta{ + Kind: "MachinePool", + APIVersion: clusterv1.GroupVersion.String(), + }, + Spec: expclusterv1.MachinePoolSpec{ + ClusterName: fmt.Sprintf("owner-cluster-%v", i), + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + ClusterName: fmt.Sprintf("owner-cluster-%v", i), + InfrastructureRef: corev1.ObjectReference{ + UID: rosaMachinePool(i).UID, + Name: rosaMachinePool(i).Name, + Namespace: ns.Namespace, + Kind: "ROSAMachinePool", + APIVersion: expclusterv1.GroupVersion.String(), + }, + }, + }, + }, + } + } + + tests := []struct { + name string + new *expinfrav1.ROSAMachinePool + old *expinfrav1.ROSAMachinePool + expect func(m *mocks.MockOCMClientMockRecorder) + result reconcile.Result + }{ + { + name: "create node pool, nodepool doesn't exist", + old: rosaMachinePool(0), + new: &expinfrav1.ROSAMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rosa-machinepool", + Namespace: ns.Name, + UID: "rosa-machinepool", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ROSAMachinePool", + APIVersion: expinfrav1.GroupVersion.String(), + }, + Spec: expinfrav1.RosaMachinePoolSpec{ + NodePoolName: "test-nodepool", + Version: "4.14.5", + Subnet: "subnet-id", + InstanceType: "m5.large", + }, + Status: expinfrav1.RosaMachinePoolStatus{ + Ready: false, + ID: rosaMachinePool(0).Spec.NodePoolName, + }, + }, + result: ctrl.Result{}, + expect: func(m *mocks.MockOCMClientMockRecorder) { + m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { + return nil, false, nil + }).Times(1) + m.CreateNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + return nodePool, nil + }).Times(1) + }, + }, + { + name: "Nodepool exist, but is not ready", + old: rosaMachinePool(1), + new: &expinfrav1.ROSAMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rosa-machinepool", + Namespace: ns.Name, + UID: "rosa-machinepool", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ROSAMachinePool", + APIVersion: expinfrav1.GroupVersion.String(), + }, + Spec: expinfrav1.RosaMachinePoolSpec{ + NodePoolName: "test-nodepool", + Version: "4.14.5", + Subnet: "subnet-id", + InstanceType: "m5.large", + }, + Status: expinfrav1.RosaMachinePoolStatus{ + Ready: false, + Replicas: 0, + }, + }, + result: ctrl.Result{RequeueAfter: time.Second * 60}, + expect: func(m *mocks.MockOCMClientMockRecorder) { + m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { + nodePoolBuilder := nodePoolBuilder(rosaMachinePool(1).Spec, ownerMachinePool(1).Spec) + nodePool, err := nodePoolBuilder.ID("node-pool-1").Build() + g.Expect(err).To(BeNil()) + return nodePool, true, nil + }).Times(1) + m.UpdateNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + return nodePool, nil + }).Times(1) + m.CreateNodePool(gomock.Any(), gomock.Any()).Times(0) + }, + }, + { + name: "Nodepool is ready", + old: rosaMachinePool(2), + new: &expinfrav1.ROSAMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rosa-machinepool", + Namespace: ns.Name, + UID: "rosa-machinepool", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ROSAMachinePool", + APIVersion: expinfrav1.GroupVersion.String(), + }, + Spec: expinfrav1.RosaMachinePoolSpec{ + NodePoolName: "test-nodepool", + Version: "4.14.5", + Subnet: "subnet-id", + InstanceType: "m5.large", + }, + Status: expinfrav1.RosaMachinePoolStatus{ + Ready: true, + Replicas: 1, + }, + }, + result: ctrl.Result{}, + expect: func(m *mocks.MockOCMClientMockRecorder) { + m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { + nodePoolBuilder := nodePoolBuilder(rosaMachinePool(2).Spec, ownerMachinePool(2).Spec) + statusBuilder := (&cmv1.NodePoolStatusBuilder{}).CurrentReplicas(1) + autoscalingBuilder := (&cmv1.NodePoolAutoscalingBuilder{}).MinReplica(1).MaxReplica(1) + nodePool, err := nodePoolBuilder.ID("node-pool-1").Autoscaling(autoscalingBuilder).Replicas(1).Status(statusBuilder).Build() + g.Expect(err).NotTo(HaveOccurred()) + + return nodePool, true, nil + }).Times(1) + m.UpdateNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterID string, nodePool *cmv1.NodePool) (*cmv1.NodePool, error) { + statusBuilder := (&cmv1.NodePoolStatusBuilder{}).CurrentReplicas(1) + version := (&cmv1.VersionBuilder{}).RawID("4.14.5") + npBuilder := cmv1.NodePoolBuilder{} + updatedNodePool, err := npBuilder.Copy(nodePool).Status(statusBuilder).Version(version).Build() + g.Expect(err).NotTo(HaveOccurred()) + + return updatedNodePool, nil + }).Times(1) + m.CreateNodePool(gomock.Any(), gomock.Any()).Times(0) + }, + }, + } + + createObject(g, secret, ns.Name) + createObject(g, identity, ns.Name) + defer cleanupObject(g, secret) + defer cleanupObject(g, identity) + + for i, test := range tests { + t.Run(test.name, func(t *testing.T) { + // This is set by CAPI MachinePool reconcile + test.old.OwnerReferences = []metav1.OwnerReference{ + { + Name: ownerMachinePool(i).Name, + UID: ownerMachinePool(i).UID, + Kind: "MachinePool", + APIVersion: clusterv1.GroupVersion.String(), + }, + } + cp := rosaControlPlane(i) + objects := []client.Object{ownerCluster(i), ownerMachinePool(i), cp, test.old} + + for _, obj := range objects { + createObject(g, obj, ns.Name) + } + // make Control Plane ready, can't do this duirng creation + mpPh, err := patch.NewHelper(cp, testEnv) + cp.Status.Ready = true + g.Expect(mpPh.Patch(ctx, cp)).To(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + // patching is not reliably synchronous + time.Sleep(50 * time.Millisecond) + + mockCtrl := gomock.NewController(t) + recorder := record.NewFakeRecorder(10) + ctx := context.TODO() + ocmMock := mocks.NewMockOCMClient(mockCtrl) + test.expect(ocmMock.EXPECT()) + + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) + stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Times(1) + + r := ROSAMachinePoolReconciler{ + Recorder: recorder, + WatchFilterValue: "", + Endpoints: []scope.ServiceEndpoint{}, + Client: testEnv, + NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI { return stsMock }, + NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { + return ocmMock, nil + }, + } + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: test.old.Name, Namespace: ns.Name} + + result, errReconcile := r.Reconcile(ctx, req) + g.Expect(errReconcile).ToNot(HaveOccurred()) + g.Expect(result).To(Equal(test.result)) + time.Sleep(50 * time.Millisecond) + + m := &expinfrav1.ROSAMachinePool{} + key := client.ObjectKey{Name: test.old.Name, Namespace: ns.Name} + errGet := testEnv.Get(ctx, key, m) + g.Expect(errGet).NotTo(HaveOccurred()) + g.Expect(m.Status.Ready).To(Equal(test.new.Status.Ready)) + g.Expect(m.Status.Replicas).To(Equal(test.new.Status.Replicas)) + g.Expect(m.Status.ID).To(Equal(test.new.Status.ID)) + + // cleanup + for _, obj := range objects { + cleanupObject(g, obj) + } + mockCtrl.Finish() + }) + } + + t.Run("Reconcile delete", func(t *testing.T) { + g := NewWithT(t) + mockCtrl := gomock.NewController(t) + recorder := record.NewFakeRecorder(10) + ctx := context.TODO() + mp := &expinfrav1.ROSAMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rosa-machinepool-9", + Namespace: ns.Name, + UID: "rosa-machinepool-9", + Finalizers: []string{expinfrav1.RosaMachinePoolFinalizer}, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ROSAMachinePool", + APIVersion: expinfrav1.GroupVersion.String(), + }, + Spec: expinfrav1.RosaMachinePoolSpec{ + NodePoolName: "test-nodepool-1", + Version: "4.14.5", + + Subnet: "subnet-id", + InstanceType: "m5.large", + }, + } + oc := ownerCluster(9) + omp := ownerMachinePool(9) + cp := rosaControlPlane(9) + objects := []client.Object{oc, omp, cp, mp} + + for _, obj := range objects { + createObject(g, obj, ns.Name) + } + + cpPh, err := patch.NewHelper(cp, testEnv) + cp.Status.Ready = true + cp.Status.ID = "rosa-control-plane-9" + g.Expect(cpPh.Patch(ctx, cp)).To(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + + ocmMock := mocks.NewMockOCMClient(mockCtrl) + expect := func(m *mocks.MockOCMClientMockRecorder) { + m.GetNodePool(gomock.Any(), gomock.Any()).DoAndReturn(func(clusterId string, nodePoolID string) (*cmv1.NodePool, bool, error) { + nodePoolBuilder := nodePoolBuilder(mp.Spec, omp.Spec) + nodePool, err := nodePoolBuilder.ID("node-pool-1").Build() + g.Expect(err).NotTo(HaveOccurred()) + return nodePool, true, nil + }).Times(1) + m.DeleteNodePool("rosa-control-plane-9", "node-pool-1").DoAndReturn(func(clusterId string, nodePoolID string) error { + return nil + }).Times(1) + } + expect(ocmMock.EXPECT()) + + stsMock := mock_stsiface.NewMockSTSAPI(mockCtrl) + stsMock.EXPECT().GetCallerIdentity(gomock.Any()).Times(1) + + r := ROSAMachinePoolReconciler{ + Recorder: recorder, + WatchFilterValue: "", + Endpoints: []scope.ServiceEndpoint{}, + Client: testEnv, + NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI { return stsMock }, + NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { + return ocmMock, nil + }, + } + + log := logger.FromContext(ctx) + machinePoolScope, err1 := scope.NewRosaMachinePoolScope(scope.RosaMachinePoolScopeParams{ + Client: r.Client, + ControllerName: "rosamachinepool", + Cluster: oc, + ControlPlane: cp, + MachinePool: omp, + RosaMachinePool: mp, + Logger: log, + Endpoints: r.Endpoints, + }) + g.Expect(err1).ToNot(HaveOccurred()) + + rosaControlPlaneScope, err2 := scope.NewROSAControlPlaneScope(scope.ROSAControlPlaneScopeParams{ + Client: r.Client, + Cluster: oc, + ControlPlane: cp, + ControllerName: "rosaControlPlane", + Endpoints: r.Endpoints, + NewStsClient: r.NewStsClient, + }) + g.Expect(err2).ToNot(HaveOccurred()) + + err3 := r.reconcileDelete(ctx, machinePoolScope, rosaControlPlaneScope) + g.Expect(err3).ToNot(HaveOccurred()) + + machinePoolScope.Close() + time.Sleep(50 * time.Millisecond) + m := &expinfrav1.ROSAMachinePool{} + key := client.ObjectKey{Name: mp.Name, Namespace: ns.Name} + err4 := testEnv.Get(ctx, key, m) + g.Expect(err4).ToNot(HaveOccurred()) + g.Expect(m.Finalizers).To(BeNil()) + + for _, obj := range objects { + cleanupObject(g, obj) + } + mockCtrl.Finish() + }) +} + +func createObject(g *WithT, obj client.Object, namespace string) { + if obj.DeepCopyObject() != nil { + obj.SetNamespace(namespace) + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + } +} + +func cleanupObject(g *WithT, obj client.Object) { + if obj.DeepCopyObject() != nil { + g.Expect(testEnv.Cleanup(ctx, obj)).To(Succeed()) + } +} diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 5f7ded08c8..59ff50bb09 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -21,12 +21,14 @@ import ( "path" "testing" + corev1 "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" // +kubebuilder:scaffold:imports infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -52,6 +54,8 @@ func setup() { utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(corev1.AddToScheme(scheme.Scheme)) + utilruntime.Must(rosacontrolplanev1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, @@ -76,6 +80,15 @@ func setup() { if err := (&expinfrav1.AWSManagedMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { panic(fmt.Sprintf("Unable to setup AWSManagedMachinePool webhook: %v", err)) } + if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSClusterControllerIdentity webhook: %v", err)) + } + if err := (&expinfrav1.ROSAMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup ROSAMachinePool webhook: %v", err)) + } + if err := (&rosacontrolplanev1.ROSAControlPlane{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup ROSAMachinePool webhook: %v", err)) + } go func() { fmt.Println("Starting the manager") if err := testEnv.StartManager(ctx); err != nil { diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go index 71cc24ed61..166620710b 100644 --- a/pkg/cloud/scope/rosacontrolplane.go +++ b/pkg/cloud/scope/rosacontrolplane.go @@ -22,9 +22,11 @@ import ( awsclient "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -45,6 +47,7 @@ type ROSAControlPlaneScopeParams struct { ControlPlane *rosacontrolplanev1.ROSAControlPlane ControllerName string Endpoints []ServiceEndpoint + NewStsClient func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSAPI } // NewROSAControlPlaneScope creates a new ROSAControlPlaneScope from the supplied parameters. @@ -83,7 +86,7 @@ func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlP managedScope.session = session managedScope.serviceLimiters = serviceLimiters - stsClient := NewSTSClient(managedScope, managedScope, managedScope, managedScope.ControlPlane) + stsClient := params.NewStsClient(managedScope, managedScope, managedScope, managedScope.ControlPlane) identity, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) if err != nil { return nil, fmt.Errorf("failed to identify the AWS caller: %w", err) diff --git a/pkg/rosa/client.go b/pkg/rosa/client.go index 36c9ae333b..d9b7b37308 100644 --- a/pkg/rosa/client.go +++ b/pkg/rosa/client.go @@ -21,17 +21,21 @@ const ( ) // NewOCMClient creates a new OCM client. -func NewOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*ocm.Client, error) { +func NewOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (OCMClient, error) { token, url, err := ocmCredentials(ctx, rosaScope) if err != nil { return nil, err } - return ocm.NewClient().Logger(logrus.New()).Config(&ocmcfg.Config{ + ocmClient, err := ocm.NewClient().Logger(logrus.New()).Config(&ocmcfg.Config{ AccessToken: token, URL: url, }).Build() -} + c := ocmclient{ + ocmClient: ocmClient, + } + return &c, err +} func newOCMRawConnection(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*sdk.Connection, error) { logger, err := sdk.NewGoLoggerBuilder(). Debug(false). diff --git a/pkg/rosa/idps.go b/pkg/rosa/idps.go index bfa9fce65e..0d80bd7d56 100644 --- a/pkg/rosa/idps.go +++ b/pkg/rosa/idps.go @@ -4,7 +4,6 @@ import ( "fmt" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" - "github.com/openshift/rosa/pkg/ocm" ) const ( @@ -14,7 +13,7 @@ const ( // CreateAdminUserIfNotExist creates a new admin user withe username/password in the cluster if username doesn't already exist. // the user is granted admin privileges by being added to a special IDP called `cluster-admin` which will be created if it doesn't already exist. -func CreateAdminUserIfNotExist(client *ocm.Client, clusterID, username, password string) error { +func CreateAdminUserIfNotExist(client OCMClient, clusterID, username, password string) error { existingClusterAdminIDP, userList, err := findExistingClusterAdminIDP(client, clusterID) if err != nil { return fmt.Errorf("failed to find existing cluster admin IDP: %w", err) @@ -75,7 +74,7 @@ func CreateAdminUserIfNotExist(client *ocm.Client, clusterID, username, password } // CreateUserIfNotExist creates a new user with `username` and adds it to the group if it doesn't already exist. -func CreateUserIfNotExist(client *ocm.Client, clusterID string, group, username string) (*cmv1.User, error) { +func CreateUserIfNotExist(client OCMClient, clusterID string, group, username string) (*cmv1.User, error) { user, err := client.GetUser(clusterID, group, username) if user != nil || err != nil { return user, err @@ -88,7 +87,7 @@ func CreateUserIfNotExist(client *ocm.Client, clusterID string, group, username return client.CreateUser(clusterID, group, userCfg) } -func findExistingClusterAdminIDP(client *ocm.Client, clusterID string) ( +func findExistingClusterAdminIDP(client OCMClient, clusterID string) ( htpasswdIDP *cmv1.IdentityProvider, userList *cmv1.HTPasswdUserList, reterr error) { idps, err := client.GetIdentityProviders(clusterID) if err != nil { diff --git a/pkg/rosa/ocmclient.go b/pkg/rosa/ocmclient.go new file mode 100644 index 0000000000..ca7fa81fb1 --- /dev/null +++ b/pkg/rosa/ocmclient.go @@ -0,0 +1,133 @@ +// Package rosa provides a way to interact with the Red Hat OpenShift Service on AWS (ROSA) API. +package rosa + +import ( + "context" + + v1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift/rosa/pkg/aws" + "github.com/openshift/rosa/pkg/ocm" + + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" +) + +type ocmclient struct { + ocmClient *ocm.Client +} + +// OCMClient wraps ocm.Client methods that we use in interface, so we are able to mock it. +// We should get rid of this once ocm.Client has its own interface. +type OCMClient interface { + AckVersionGate(clusterID string, gateID string) error + AddHTPasswdUser(username string, password string, clusterID string, idpID string) error + CreateNodePool(clusterID string, nodePool *v1.NodePool) (*v1.NodePool, error) + CreateIdentityProvider(clusterID string, idp *v1.IdentityProvider) (*v1.IdentityProvider, error) + CreateCluster(config ocm.Spec) (*v1.Cluster, error) + CreateUser(clusterID string, group string, user *v1.User) (*v1.User, error) + DeleteCluster(clusterKey string, bestEffort bool, creator *aws.Creator) (*v1.Cluster, error) + DeleteNodePool(clusterID string, nodePoolID string) error + DeleteUser(clusterID string, group string, username string) error + GetCluster(clusterKey string, creator *aws.Creator) (*v1.Cluster, error) + GetControlPlaneUpgradePolicies(clusterID string) (controlPlaneUpgradePolicies []*v1.ControlPlaneUpgradePolicy, err error) + GetHTPasswdUserList(clusterID string, htpasswdIDPId string) (*v1.HTPasswdUserList, error) + GetIdentityProviders(clusterID string) ([]*v1.IdentityProvider, error) + GetMissingGateAgreementsHypershift(clusterID string, upgradePolicy *v1.ControlPlaneUpgradePolicy) ([]*v1.VersionGate, error) + GetNodePool(clusterID string, nodePoolID string) (*v1.NodePool, bool, error) + GetHypershiftNodePoolUpgrade(clusterID string, clusterKey string, nodePoolID string) (*v1.NodePool, *v1.NodePoolUpgradePolicy, error) + GetUser(clusterID string, group string, username string) (*v1.User, error) + ScheduleHypershiftControlPlaneUpgrade(clusterID string, upgradePolicy *v1.ControlPlaneUpgradePolicy) (*v1.ControlPlaneUpgradePolicy, error) + ScheduleNodePoolUpgrade(clusterID string, nodePoolID string, upgradePolicy *v1.NodePoolUpgradePolicy) (*v1.NodePoolUpgradePolicy, error) + UpdateNodePool(clusterID string, nodePool *v1.NodePool) (*v1.NodePool, error) + UpdateCluster(clusterKey string, creator *aws.Creator, config ocm.Spec) error + ValidateHypershiftVersion(versionRawID string, channelGroup string) (bool, error) +} + +func (c *ocmclient) AckVersionGate(clusterID string, gateID string) error { + return c.ocmClient.AckVersionGate(clusterID, gateID) +} + +func (c *ocmclient) AddHTPasswdUser(username string, password string, clusterID string, idpID string) error { + return c.ocmClient.AddHTPasswdUser(username, password, clusterID, idpID) +} +func (c *ocmclient) CreateIdentityProvider(clusterID string, idp *v1.IdentityProvider) (*v1.IdentityProvider, error) { + return c.ocmClient.CreateIdentityProvider(clusterID, idp) +} +func (c *ocmclient) CreateNodePool(clusterID string, nodePool *v1.NodePool) (*v1.NodePool, error) { + return c.ocmClient.CreateNodePool(clusterID, nodePool) +} + +func (c *ocmclient) CreateCluster(config ocm.Spec) (*v1.Cluster, error) { + return c.ocmClient.CreateCluster(config) +} +func (c *ocmclient) CreateUser(clusterID string, group string, user *v1.User) (*v1.User, error) { + return c.ocmClient.CreateUser(clusterID, group, user) +} + +func (c *ocmclient) DeleteUser(clusterID string, group string, username string) error { + return c.ocmClient.DeleteUser(clusterID, group, username) +} + +func (c *ocmclient) DeleteNodePool(clusterID string, nodePoolID string) error { + return c.ocmClient.DeleteNodePool(clusterID, nodePoolID) +} + +func (c *ocmclient) DeleteCluster(clusterKey string, bestEffort bool, creator *aws.Creator) (*v1.Cluster, error) { + return c.ocmClient.DeleteCluster(clusterKey, bestEffort, creator) +} + +func (c *ocmclient) GetIdentityProviders(clusterID string) ([]*v1.IdentityProvider, error) { + return c.ocmClient.GetIdentityProviders(clusterID) +} + +func (c *ocmclient) GetControlPlaneUpgradePolicies(clusterID string) (controlPlaneUpgradePolicies []*v1.ControlPlaneUpgradePolicy, err error) { + return c.ocmClient.GetControlPlaneUpgradePolicies(clusterID) +} + +func (c *ocmclient) GetHTPasswdUserList(clusterID string, htpasswdIDPId string) (*v1.HTPasswdUserList, error) { + return c.ocmClient.GetHTPasswdUserList(clusterID, htpasswdIDPId) +} + +func (c *ocmclient) GetMissingGateAgreementsHypershift(clusterID string, upgradePolicy *v1.ControlPlaneUpgradePolicy) ([]*v1.VersionGate, error) { + return c.ocmClient.GetMissingGateAgreementsHypershift(clusterID, upgradePolicy) +} + +func (c *ocmclient) GetNodePool(clusterID string, nodePoolID string) (*v1.NodePool, bool, error) { + return c.ocmClient.GetNodePool(clusterID, nodePoolID) +} + +func (c *ocmclient) GetHypershiftNodePoolUpgrade(clusterID string, clusterKey string, nodePoolID string) (*v1.NodePool, *v1.NodePoolUpgradePolicy, error) { + return c.ocmClient.GetHypershiftNodePoolUpgrade(clusterID, clusterKey, nodePoolID) +} + +func (c *ocmclient) GetCluster(clusterKey string, creator *aws.Creator) (*v1.Cluster, error) { + return c.ocmClient.GetCluster(clusterKey, creator) +} + +func (c *ocmclient) GetUser(clusterID string, group string, username string) (*v1.User, error) { + return c.ocmClient.GetUser(clusterID, group, username) +} + +func (c *ocmclient) ScheduleNodePoolUpgrade(clusterID string, nodePoolID string, upgradePolicy *v1.NodePoolUpgradePolicy) (*v1.NodePoolUpgradePolicy, error) { + return c.ocmClient.ScheduleNodePoolUpgrade(clusterID, nodePoolID, upgradePolicy) +} + +func (c *ocmclient) ScheduleHypershiftControlPlaneUpgrade(clusterID string, upgradePolicy *v1.ControlPlaneUpgradePolicy) (*v1.ControlPlaneUpgradePolicy, error) { + return c.ocmClient.ScheduleHypershiftControlPlaneUpgrade(clusterID, upgradePolicy) +} + +func (c *ocmclient) UpdateCluster(clusterKey string, creator *aws.Creator, config ocm.Spec) error { + return c.ocmClient.UpdateCluster(clusterKey, creator, config) +} + +func (c *ocmclient) UpdateNodePool(clusterID string, nodePool *v1.NodePool) (*v1.NodePool, error) { + return c.ocmClient.UpdateNodePool(clusterID, nodePool) +} + +func (c *ocmclient) ValidateHypershiftVersion(versionRawID string, channelGroup string) (bool, error) { + return c.ocmClient.ValidateHypershiftVersion(versionRawID, channelGroup) +} + +// NewMockOCMClient creates a new empty ocm.Client without any real connection. +func NewMockOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (OCMClient, error) { + return &ocmclient{ocmClient: &ocm.Client{}}, nil +} diff --git a/pkg/rosa/versions.go b/pkg/rosa/versions.go index 27136c8772..18706567d0 100644 --- a/pkg/rosa/versions.go +++ b/pkg/rosa/versions.go @@ -13,7 +13,7 @@ import ( var MinSupportedVersion = semver.MustParse("4.14.0") // CheckExistingScheduledUpgrade checks and returns the current upgrade schedule if any. -func CheckExistingScheduledUpgrade(client *ocm.Client, cluster *cmv1.Cluster) (*cmv1.ControlPlaneUpgradePolicy, error) { +func CheckExistingScheduledUpgrade(client OCMClient, cluster *cmv1.Cluster) (*cmv1.ControlPlaneUpgradePolicy, error) { upgradePolicies, err := client.GetControlPlaneUpgradePolicies(cluster.ID()) if err != nil { return nil, err @@ -27,7 +27,7 @@ func CheckExistingScheduledUpgrade(client *ocm.Client, cluster *cmv1.Cluster) (* } // ScheduleControlPlaneUpgrade schedules a new control plane upgrade to the specified version at the specified time. -func ScheduleControlPlaneUpgrade(client *ocm.Client, cluster *cmv1.Cluster, version string, nextRun time.Time, ack bool) (*cmv1.ControlPlaneUpgradePolicy, error) { +func ScheduleControlPlaneUpgrade(client OCMClient, cluster *cmv1.Cluster, version string, nextRun time.Time, ack bool) (*cmv1.ControlPlaneUpgradePolicy, error) { // earliestNextRun is set to at least 5 min from now by the OCM API. // Set our next run request to something slightly longer than 5min to make sure we account for the latency between when we send this // request and when the server processes it. @@ -71,7 +71,7 @@ func ScheduleControlPlaneUpgrade(client *ocm.Client, cluster *cmv1.Cluster, vers } // ScheduleNodePoolUpgrade schedules a new nodePool upgrade to the specified version at the specified time. -func ScheduleNodePoolUpgrade(client *ocm.Client, clusterID string, nodePool *cmv1.NodePool, version string, nextRun time.Time) (*cmv1.NodePoolUpgradePolicy, error) { +func ScheduleNodePoolUpgrade(client OCMClient, clusterID string, nodePool *cmv1.NodePool, version string, nextRun time.Time) (*cmv1.NodePoolUpgradePolicy, error) { // earliestNextRun is set to at least 5 min from now by the OCM API. // Set our next run request to something slightly longer than 5min to make sure we account for the latency between when we send this // request and when the server processes it. diff --git a/test/mocks/generate_capa.go b/test/mocks/generate_capa.go index a1a46e7c4b..ca3012d486 100644 --- a/test/mocks/generate_capa.go +++ b/test/mocks/generate_capa.go @@ -14,7 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mocks provides a way to generate mock objects for OCM and ClusterScoper services. +// //go:generate ../../hack/tools/bin/mockgen -destination capa_clusterscoper_mock.go -package mocks sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud ClusterScoper //go:generate /usr/bin/env bash -c "cat ../../hack/boilerplate/boilerplate.generatego.txt capa_clusterscoper_mock.go > _capa_clusterscoper_mock.go && mv _capa_clusterscoper_mock.go capa_clusterscoper_mock.go" - +//go:generate ../../hack/tools/bin/mockgen -destination ocm_client_mock.go -package mocks sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa OCMClient +//go:generate /usr/bin/env bash -c "cat ../../hack/boilerplate/boilerplate.generatego.txt ocm_client_mock.go > _ocm_client_mock.go && mv _ocm_client_mock.go ocm_client_mock.go" package mocks diff --git a/test/mocks/ocm_client_mock.go b/test/mocks/ocm_client_mock.go new file mode 100644 index 0000000000..4e948cf639 --- /dev/null +++ b/test/mocks/ocm_client_mock.go @@ -0,0 +1,380 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa (interfaces: OCMClient) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + v1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + aws "github.com/openshift/rosa/pkg/aws" + ocm "github.com/openshift/rosa/pkg/ocm" +) + +// MockOCMClient is a mock of OCMClient interface. +type MockOCMClient struct { + ctrl *gomock.Controller + recorder *MockOCMClientMockRecorder +} + +// MockOCMClientMockRecorder is the mock recorder for MockOCMClient. +type MockOCMClientMockRecorder struct { + mock *MockOCMClient +} + +// NewMockOCMClient creates a new mock instance. +func NewMockOCMClient(ctrl *gomock.Controller) *MockOCMClient { + mock := &MockOCMClient{ctrl: ctrl} + mock.recorder = &MockOCMClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockOCMClient) EXPECT() *MockOCMClientMockRecorder { + return m.recorder +} + +// AckVersionGate mocks base method. +func (m *MockOCMClient) AckVersionGate(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AckVersionGate", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// AckVersionGate indicates an expected call of AckVersionGate. +func (mr *MockOCMClientMockRecorder) AckVersionGate(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AckVersionGate", reflect.TypeOf((*MockOCMClient)(nil).AckVersionGate), arg0, arg1) +} + +// AddHTPasswdUser mocks base method. +func (m *MockOCMClient) AddHTPasswdUser(arg0, arg1, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddHTPasswdUser", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddHTPasswdUser indicates an expected call of AddHTPasswdUser. +func (mr *MockOCMClientMockRecorder) AddHTPasswdUser(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHTPasswdUser", reflect.TypeOf((*MockOCMClient)(nil).AddHTPasswdUser), arg0, arg1, arg2, arg3) +} + +// CreateCluster mocks base method. +func (m *MockOCMClient) CreateCluster(arg0 ocm.Spec) (*v1.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateCluster", arg0) + ret0, _ := ret[0].(*v1.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateCluster indicates an expected call of CreateCluster. +func (mr *MockOCMClientMockRecorder) CreateCluster(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCluster", reflect.TypeOf((*MockOCMClient)(nil).CreateCluster), arg0) +} + +// CreateIdentityProvider mocks base method. +func (m *MockOCMClient) CreateIdentityProvider(arg0 string, arg1 *v1.IdentityProvider) (*v1.IdentityProvider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateIdentityProvider", arg0, arg1) + ret0, _ := ret[0].(*v1.IdentityProvider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateIdentityProvider indicates an expected call of CreateIdentityProvider. +func (mr *MockOCMClientMockRecorder) CreateIdentityProvider(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIdentityProvider", reflect.TypeOf((*MockOCMClient)(nil).CreateIdentityProvider), arg0, arg1) +} + +// CreateNodePool mocks base method. +func (m *MockOCMClient) CreateNodePool(arg0 string, arg1 *v1.NodePool) (*v1.NodePool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNodePool", arg0, arg1) + ret0, _ := ret[0].(*v1.NodePool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNodePool indicates an expected call of CreateNodePool. +func (mr *MockOCMClientMockRecorder) CreateNodePool(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodePool", reflect.TypeOf((*MockOCMClient)(nil).CreateNodePool), arg0, arg1) +} + +// CreateUser mocks base method. +func (m *MockOCMClient) CreateUser(arg0, arg1 string, arg2 *v1.User) (*v1.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateUser", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateUser indicates an expected call of CreateUser. +func (mr *MockOCMClientMockRecorder) CreateUser(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockOCMClient)(nil).CreateUser), arg0, arg1, arg2) +} + +// DeleteCluster mocks base method. +func (m *MockOCMClient) DeleteCluster(arg0 string, arg1 bool, arg2 *aws.Creator) (*v1.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCluster", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteCluster indicates an expected call of DeleteCluster. +func (mr *MockOCMClientMockRecorder) DeleteCluster(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockOCMClient)(nil).DeleteCluster), arg0, arg1, arg2) +} + +// DeleteNodePool mocks base method. +func (m *MockOCMClient) DeleteNodePool(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNodePool", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNodePool indicates an expected call of DeleteNodePool. +func (mr *MockOCMClientMockRecorder) DeleteNodePool(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodePool", reflect.TypeOf((*MockOCMClient)(nil).DeleteNodePool), arg0, arg1) +} + +// DeleteUser mocks base method. +func (m *MockOCMClient) DeleteUser(arg0, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteUser", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteUser indicates an expected call of DeleteUser. +func (mr *MockOCMClientMockRecorder) DeleteUser(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockOCMClient)(nil).DeleteUser), arg0, arg1, arg2) +} + +// GetCluster mocks base method. +func (m *MockOCMClient) GetCluster(arg0 string, arg1 *aws.Creator) (*v1.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCluster", arg0, arg1) + ret0, _ := ret[0].(*v1.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCluster indicates an expected call of GetCluster. +func (mr *MockOCMClientMockRecorder) GetCluster(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCluster", reflect.TypeOf((*MockOCMClient)(nil).GetCluster), arg0, arg1) +} + +// GetControlPlaneUpgradePolicies mocks base method. +func (m *MockOCMClient) GetControlPlaneUpgradePolicies(arg0 string) ([]*v1.ControlPlaneUpgradePolicy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetControlPlaneUpgradePolicies", arg0) + ret0, _ := ret[0].([]*v1.ControlPlaneUpgradePolicy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetControlPlaneUpgradePolicies indicates an expected call of GetControlPlaneUpgradePolicies. +func (mr *MockOCMClientMockRecorder) GetControlPlaneUpgradePolicies(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetControlPlaneUpgradePolicies", reflect.TypeOf((*MockOCMClient)(nil).GetControlPlaneUpgradePolicies), arg0) +} + +// GetHTPasswdUserList mocks base method. +func (m *MockOCMClient) GetHTPasswdUserList(arg0, arg1 string) (*v1.HTPasswdUserList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHTPasswdUserList", arg0, arg1) + ret0, _ := ret[0].(*v1.HTPasswdUserList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHTPasswdUserList indicates an expected call of GetHTPasswdUserList. +func (mr *MockOCMClientMockRecorder) GetHTPasswdUserList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHTPasswdUserList", reflect.TypeOf((*MockOCMClient)(nil).GetHTPasswdUserList), arg0, arg1) +} + +// GetHypershiftNodePoolUpgrade mocks base method. +func (m *MockOCMClient) GetHypershiftNodePoolUpgrade(arg0, arg1, arg2 string) (*v1.NodePool, *v1.NodePoolUpgradePolicy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHypershiftNodePoolUpgrade", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.NodePool) + ret1, _ := ret[1].(*v1.NodePoolUpgradePolicy) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetHypershiftNodePoolUpgrade indicates an expected call of GetHypershiftNodePoolUpgrade. +func (mr *MockOCMClientMockRecorder) GetHypershiftNodePoolUpgrade(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHypershiftNodePoolUpgrade", reflect.TypeOf((*MockOCMClient)(nil).GetHypershiftNodePoolUpgrade), arg0, arg1, arg2) +} + +// GetIdentityProviders mocks base method. +func (m *MockOCMClient) GetIdentityProviders(arg0 string) ([]*v1.IdentityProvider, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetIdentityProviders", arg0) + ret0, _ := ret[0].([]*v1.IdentityProvider) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetIdentityProviders indicates an expected call of GetIdentityProviders. +func (mr *MockOCMClientMockRecorder) GetIdentityProviders(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIdentityProviders", reflect.TypeOf((*MockOCMClient)(nil).GetIdentityProviders), arg0) +} + +// GetMissingGateAgreementsHypershift mocks base method. +func (m *MockOCMClient) GetMissingGateAgreementsHypershift(arg0 string, arg1 *v1.ControlPlaneUpgradePolicy) ([]*v1.VersionGate, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMissingGateAgreementsHypershift", arg0, arg1) + ret0, _ := ret[0].([]*v1.VersionGate) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMissingGateAgreementsHypershift indicates an expected call of GetMissingGateAgreementsHypershift. +func (mr *MockOCMClientMockRecorder) GetMissingGateAgreementsHypershift(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMissingGateAgreementsHypershift", reflect.TypeOf((*MockOCMClient)(nil).GetMissingGateAgreementsHypershift), arg0, arg1) +} + +// GetNodePool mocks base method. +func (m *MockOCMClient) GetNodePool(arg0, arg1 string) (*v1.NodePool, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodePool", arg0, arg1) + ret0, _ := ret[0].(*v1.NodePool) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetNodePool indicates an expected call of GetNodePool. +func (mr *MockOCMClientMockRecorder) GetNodePool(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodePool", reflect.TypeOf((*MockOCMClient)(nil).GetNodePool), arg0, arg1) +} + +// GetUser mocks base method. +func (m *MockOCMClient) GetUser(arg0, arg1, arg2 string) (*v1.User, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUser", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.User) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUser indicates an expected call of GetUser. +func (mr *MockOCMClientMockRecorder) GetUser(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUser", reflect.TypeOf((*MockOCMClient)(nil).GetUser), arg0, arg1, arg2) +} + +// ScheduleHypershiftControlPlaneUpgrade mocks base method. +func (m *MockOCMClient) ScheduleHypershiftControlPlaneUpgrade(arg0 string, arg1 *v1.ControlPlaneUpgradePolicy) (*v1.ControlPlaneUpgradePolicy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ScheduleHypershiftControlPlaneUpgrade", arg0, arg1) + ret0, _ := ret[0].(*v1.ControlPlaneUpgradePolicy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ScheduleHypershiftControlPlaneUpgrade indicates an expected call of ScheduleHypershiftControlPlaneUpgrade. +func (mr *MockOCMClientMockRecorder) ScheduleHypershiftControlPlaneUpgrade(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleHypershiftControlPlaneUpgrade", reflect.TypeOf((*MockOCMClient)(nil).ScheduleHypershiftControlPlaneUpgrade), arg0, arg1) +} + +// ScheduleNodePoolUpgrade mocks base method. +func (m *MockOCMClient) ScheduleNodePoolUpgrade(arg0, arg1 string, arg2 *v1.NodePoolUpgradePolicy) (*v1.NodePoolUpgradePolicy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ScheduleNodePoolUpgrade", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.NodePoolUpgradePolicy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ScheduleNodePoolUpgrade indicates an expected call of ScheduleNodePoolUpgrade. +func (mr *MockOCMClientMockRecorder) ScheduleNodePoolUpgrade(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleNodePoolUpgrade", reflect.TypeOf((*MockOCMClient)(nil).ScheduleNodePoolUpgrade), arg0, arg1, arg2) +} + +// UpdateCluster mocks base method. +func (m *MockOCMClient) UpdateCluster(arg0 string, arg1 *aws.Creator, arg2 ocm.Spec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateCluster", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateCluster indicates an expected call of UpdateCluster. +func (mr *MockOCMClientMockRecorder) UpdateCluster(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCluster", reflect.TypeOf((*MockOCMClient)(nil).UpdateCluster), arg0, arg1, arg2) +} + +// UpdateNodePool mocks base method. +func (m *MockOCMClient) UpdateNodePool(arg0 string, arg1 *v1.NodePool) (*v1.NodePool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNodePool", arg0, arg1) + ret0, _ := ret[0].(*v1.NodePool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateNodePool indicates an expected call of UpdateNodePool. +func (mr *MockOCMClientMockRecorder) UpdateNodePool(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodePool", reflect.TypeOf((*MockOCMClient)(nil).UpdateNodePool), arg0, arg1) +} + +// ValidateHypershiftVersion mocks base method. +func (m *MockOCMClient) ValidateHypershiftVersion(arg0, arg1 string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateHypershiftVersion", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateHypershiftVersion indicates an expected call of ValidateHypershiftVersion. +func (mr *MockOCMClientMockRecorder) ValidateHypershiftVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateHypershiftVersion", reflect.TypeOf((*MockOCMClient)(nil).ValidateHypershiftVersion), arg0, arg1) +}