Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OCPBUGS-44655,OCPBUGS-43083: Fix IPv6 Disconnected HCP deployments #5168

Merged
merged 2 commits into from
Dec 18, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions cmd/cluster/core/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ func (opts *RawCreateOptions) Validate(ctx context.Context) (*ValidatedCreateOpt
if err != nil {
return nil, fmt.Errorf("could not retrieve kube clientset: %w", err)
}
if err := validateMgmtClusterAndNodePoolCPUArchitectures(ctx, opts, kc); err != nil {
if err := validateMgmtClusterAndNodePoolCPUArchitectures(ctx, opts, kc, &hyperutil.RegistryClientImageMetadataProvider{}); err != nil {
return nil, err
}
}
Expand Down Expand Up @@ -1012,7 +1012,7 @@ func parseTolerationString(str string) (*corev1.Toleration, error) {
// validateMgmtClusterAndNodePoolCPUArchitectures checks if a multi-arch release image or release stream was provided.
// If none were provided, checks to make sure the NodePool CPU arch and the management cluster CPU arch match; if they
// do not, the CLI will return an error since the NodePool will fail to complete during runtime.
func validateMgmtClusterAndNodePoolCPUArchitectures(ctx context.Context, opts *RawCreateOptions, kc kubeclient.Interface) error {
func validateMgmtClusterAndNodePoolCPUArchitectures(ctx context.Context, opts *RawCreateOptions, kc kubeclient.Interface, imageMetadataProvider hyperutil.ImageMetadataProvider) error {
validMultiArchImage := false

// Check if the release image is multi-arch
Expand All @@ -1021,8 +1021,7 @@ func validateMgmtClusterAndNodePoolCPUArchitectures(ctx context.Context, opts *R
if err != nil {
return fmt.Errorf("failed to read pull secret file: %w", err)
}

validMultiArchImage, err = registryclient.IsMultiArchManifestList(ctx, opts.ReleaseImage, pullSecret)
validMultiArchImage, err = registryclient.IsMultiArchManifestList(ctx, opts.ReleaseImage, pullSecret, imageMetadataProvider)
if err != nil {
return err
}
Expand Down
10 changes: 8 additions & 2 deletions cmd/cluster/core/create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (

. "github.com/onsi/gomega"
hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
"github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client"
"github.com/openshift/hypershift/support/util/fakeimagemetadataprovider"
crclient "sigs.k8s.io/controller-runtime/pkg/client"

corev1 "k8s.io/api/core/v1"
Expand All @@ -19,11 +21,15 @@ func TestValidateMgmtClusterAndNodePoolCPUArchitectures(t *testing.T) {

fakeKubeClient := fakekubeclient.NewSimpleClientset()
fakeDiscovery, ok := fakeKubeClient.Discovery().(*fakediscovery.FakeDiscovery)

if !ok {
t.Fatalf("failed to convert FakeDiscovery")
}

fakeMetadataProvider := &fakeimagemetadataprovider.FakeRegistryClientImageMetadataProvider{
Result: &dockerv1client.DockerImageConfig{},
Manifest: fakeimagemetadataprovider.FakeManifest{},
}

// if you want to fake a specific version
fakeDiscovery.FakedServerVersion = &apiversion.Info{
Platform: "linux/amd64",
Expand Down Expand Up @@ -80,7 +86,7 @@ func TestValidateMgmtClusterAndNodePoolCPUArchitectures(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
err := validateMgmtClusterAndNodePoolCPUArchitectures(ctx, tc.opts, fakeKubeClient)
err := validateMgmtClusterAndNodePoolCPUArchitectures(ctx, tc.opts, fakeKubeClient, fakeMetadataProvider)
if tc.expectError {
g.Expect(err).To(HaveOccurred())
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func cvoLabels() map[string]string {

var port int32 = 8443

func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, deploymentConfig config.DeploymentConfig, controlPlaneImage, releaseImage, cliImage, availabilityProberImage, clusterID string, updateService configv1.URL, platformType hyperv1.PlatformType, oauthEnabled, enableCVOManagementClusterMetricsAccess bool, featureSet configv1.FeatureSet) error {
func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, deploymentConfig config.DeploymentConfig, controlPlaneReleaseImage, dataPlaneReleaseImage, cliImage, availabilityProberImage, clusterID string, updateService configv1.URL, platformType hyperv1.PlatformType, oauthEnabled, enableCVOManagementClusterMetricsAccess bool, featureSet configv1.FeatureSet) error {
ownerRef.ApplyTo(deployment)

// preserve existing resource requirements for main CVO container
Expand All @@ -138,11 +138,11 @@ func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef
Spec: corev1.PodSpec{
AutomountServiceAccountToken: ptr.To(false),
InitContainers: []corev1.Container{
util.BuildContainer(cvoContainerPrepPayload(), buildCVOContainerPrepPayload(releaseImage, platformType, oauthEnabled, featureSet)),
util.BuildContainer(cvoContainerPrepPayload(), buildCVOContainerPrepPayload(dataPlaneReleaseImage, platformType, oauthEnabled, featureSet)),
util.BuildContainer(cvoContainerBootstrap(), buildCVOContainerBootstrap(cliImage, clusterID)),
},
Containers: []corev1.Container{
util.BuildContainer(cvoContainerMain(), buildCVOContainerMain(controlPlaneImage, releaseImage, deployment.Namespace, updateService, enableCVOManagementClusterMetricsAccess)),
util.BuildContainer(cvoContainerMain(), buildCVOContainerMain(controlPlaneReleaseImage, dataPlaneReleaseImage, deployment.Namespace, updateService, enableCVOManagementClusterMetricsAccess)),
},
Volumes: []corev1.Volume{
util.BuildVolume(cvoVolumePayload(), buildCVOVolumePayload),
Expand Down Expand Up @@ -331,6 +331,7 @@ func preparePayloadScript(platformType hyperv1.PlatformType, oauthEnabled bool,
" release.openshift.io/delete: \"true\"",
)
}
stmts = append(stmts, "EOF")
return strings.Join(stmts, "\n")
}

Expand Down Expand Up @@ -361,17 +362,17 @@ oc get clusterversion/version &> /dev/null || oc create -f /tmp/clusterversion.y
return fmt.Sprintf(scriptTemplate, clusterID, payloadDir)
}

func buildCVOContainerMain(image, releaseImage, namespace string, updateService configv1.URL, enableCVOManagementClusterMetricsAccess bool) func(c *corev1.Container) {
func buildCVOContainerMain(controlPlaneReleaseImage, dataPlaneReleaseImage, namespace string, updateService configv1.URL, enableCVOManagementClusterMetricsAccess bool) func(c *corev1.Container) {
cpath := func(vol, file string) string {
return path.Join(volumeMounts.Path(cvoContainerMain().Name, vol), file)
}
return func(c *corev1.Container) {
c.Image = image
c.Image = controlPlaneReleaseImage
c.Command = []string{"cluster-version-operator"}
c.Args = []string{
"start",
"--release-image",
releaseImage,
dataPlaneReleaseImage,
"--enable-auto-update=false",
"--kubeconfig",
path.Join(volumeMounts.Path(c.Name, cvoVolumeKubeconfig().Name), kas.KubeconfigKey),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,7 @@ type HostedControlPlaneReconciler struct {
awsSession *session.Session
reconcileInfrastructureStatus func(ctx context.Context, hcp *hyperv1.HostedControlPlane) (infra.InfrastructureStatus, error)
EnableCVOManagementClusterMetricsAccess bool
ImageMetadataProvider util.ImageMetadataProvider

IsCPOV2 bool
}
Expand Down Expand Up @@ -3639,9 +3640,47 @@ func (r *HostedControlPlaneReconciler) reconcileClusterVersionOperator(ctx conte
}
}

var (
controlPlaneReleaseImage string
dataPlaneReleaseImage string
)
// The CVO prepare-payload script needs the ReleaseImage digest for disconnected environments
jparrill marked this conversation as resolved.
Show resolved Hide resolved
pullSecret := common.PullSecret(hcp.Namespace)
if err := r.Get(ctx, client.ObjectKeyFromObject(pullSecret), pullSecret); err != nil {
return fmt.Errorf("failed to get pull secret for namespace %s: %w", hcp.Namespace, err)
}

cpRef, err := registryclient.GetCorrectArchImage(ctx, "cluster-version-operator", p.ControlPlaneImage, pullSecret.Data[corev1.DockerConfigJsonKey], r.ImageMetadataProvider)
if err != nil {
return fmt.Errorf("failed to parse control plane release image %s: %w", cpRef, err)
}

_, cpReleaseImageRef, err := r.ImageMetadataProvider.GetDigest(ctx, cpRef, pullSecret.Data[corev1.DockerConfigJsonKey])
if err != nil {
return fmt.Errorf("failed to get control plane release image digest %s: %w", cpRef, err)
}

controlPlaneReleaseImage = fmt.Sprintf("%s/%s/%s", cpReleaseImageRef.Registry, cpReleaseImageRef.Namespace, cpReleaseImageRef.NameString())

if p.ControlPlaneImage != hcp.Spec.ReleaseImage {
dpRef, err := registryclient.GetCorrectArchImage(ctx, "cluster-version-operator", hcp.Spec.ReleaseImage, pullSecret.Data[corev1.DockerConfigJsonKey], r.ImageMetadataProvider)
if err != nil {
return fmt.Errorf("failed to parse data plane release image %s: %w", dpRef, err)
}

_, dpReleaseImageRef, err := r.ImageMetadataProvider.GetDigest(ctx, dpRef, pullSecret.Data[corev1.DockerConfigJsonKey])
if err != nil {
return fmt.Errorf("failed to get data plane release image digest %s: %w", dpRef, err)
}

dataPlaneReleaseImage = fmt.Sprintf("%s/%s/%s", dpReleaseImageRef.Registry, dpReleaseImageRef.Namespace, dpReleaseImageRef.NameString())
} else {
dataPlaneReleaseImage = controlPlaneReleaseImage
}

deployment := manifests.ClusterVersionOperatorDeployment(hcp.Namespace)
if _, err := createOrUpdate(ctx, r, deployment, func() error {
return cvo.ReconcileDeployment(deployment, p.OwnerRef, p.DeploymentConfig, p.ControlPlaneImage, p.ReleaseImage, p.CLIImage, p.AvailabilityProberImage, p.ClusterID, hcp.Spec.UpdateService, p.PlatformType, util.HCPOAuthEnabled(hcp), r.EnableCVOManagementClusterMetricsAccess, p.FeatureSet)
return cvo.ReconcileDeployment(deployment, p.OwnerRef, p.DeploymentConfig, controlPlaneReleaseImage, dataPlaneReleaseImage, p.CLIImage, p.AvailabilityProberImage, p.ClusterID, hcp.Spec.UpdateService, p.PlatformType, util.HCPOAuthEnabled(hcp), r.EnableCVOManagementClusterMetricsAccess, p.FeatureSet)
}); err != nil {
return fmt.Errorf("failed to reconcile cluster version operator deployment: %w", err)
}
Expand Down Expand Up @@ -3939,7 +3978,7 @@ func (r *HostedControlPlaneReconciler) reconcileOperatorLifecycleManager(ctx con

var getCatalogImagesErr error
olmCatalogImagesOnce.Do(func() {
catalogImages, err = olm.GetCatalogImages(ctx, *hcp, pullSecret.Data[corev1.DockerConfigJsonKey], registryclient.GetListDigest)
catalogImages, err = olm.GetCatalogImages(ctx, *hcp, pullSecret.Data[corev1.DockerConfigJsonKey], registryclient.GetListDigest, r.ImageMetadataProvider)
if err != nil {
getCatalogImagesErr = err
return
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1817,5 +1817,14 @@ func componentsFakeDependencies(componentName string, namespace string) []client
fakeComponents = append(fakeComponents, fakeComponentTemplate.DeepCopy())
}

pullSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "pull-secret", Namespace: "hcp-namespace"},
Data: map[string][]byte{
corev1.DockerConfigJsonKey: []byte(`{}`),
},
}

fakeComponents = append(fakeComponents, pullSecret.DeepCopy())

return fakeComponents
}
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,9 @@ func findTagReference(tags []imagev1.TagReference, name string) *imagev1.TagRefe
return nil
}

func GetCatalogImages(ctx context.Context, hcp hyperv1.HostedControlPlane, pullSecret []byte, digestLister registryclient.DigestListerFN) (map[string]string, error) {
func GetCatalogImages(ctx context.Context, hcp hyperv1.HostedControlPlane, pullSecret []byte, digestLister registryclient.DigestListerFN, imageMetadataProvider util.ImageMetadataProvider) (map[string]string, error) {
imageRef := hcp.Spec.ReleaseImage
imageConfig, _, _, err := registryclient.GetMetadata(ctx, imageRef, pullSecret)
imageConfig, _, _, err := imageMetadataProvider.GetMetadata(ctx, imageRef, pullSecret)
if err != nil {
return nil, fmt.Errorf("failed to get image metadata: %w", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,7 @@ spec:
annotations:
include.release.openshift.io/ibm-cloud-managed: "true"
release.openshift.io/delete: "true"
EOF
command:
- /bin/bash
image: cluster-version-operator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@ import (
configv1 "github.com/openshift/api/config/v1"

hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common"
hyperapi "github.com/openshift/hypershift/support/api"
"github.com/openshift/hypershift/support/config"
component "github.com/openshift/hypershift/support/controlplane-component"
"github.com/openshift/hypershift/support/releaseinfo/registryclient"
"github.com/openshift/hypershift/support/util"

appsv1 "k8s.io/api/apps/v1"
Expand All @@ -37,11 +39,18 @@ func (cvo *clusterVersionOperator) adaptDeployment(cpContext component.WorkloadC
featureSet = cpContext.HCP.Spec.Configuration.FeatureGate.FeatureSet
}

// The CVO prepare-payload script needs the ReleaseImage digest for disconnected environments
controlPlaneReleaseImage, dataPlaneReleaseImage, err := discoverCVOReleaseImages(cpContext)
if err != nil {
return fmt.Errorf("failed to discover CVO release images: %w", err)
}

util.UpdateContainer("prepare-payload", deployment.Spec.Template.Spec.InitContainers, func(c *corev1.Container) {
c.Args = []string{
"-c",
preparePayloadScript(cpContext.HCP.Spec.Platform.Type, util.HCPOAuthEnabled(cpContext.HCP), featureSet),
}
c.Image = controlPlaneReleaseImage
})
util.UpdateContainer("bootstrap", deployment.Spec.Template.Spec.InitContainers, func(c *corev1.Container) {
c.Env = append(c.Env, corev1.EnvVar{
Expand All @@ -53,7 +62,7 @@ func (cvo *clusterVersionOperator) adaptDeployment(cpContext component.WorkloadC
util.UpdateContainer(ComponentName, deployment.Spec.Template.Spec.Containers, func(c *corev1.Container) {
util.UpsertEnvVar(c, corev1.EnvVar{
Name: "RELEASE_IMAGE",
Value: cpContext.HCP.Spec.ReleaseImage,
Value: dataPlaneReleaseImage,
})

if updateService := cpContext.HCP.Spec.UpdateService; updateService != "" {
Expand Down Expand Up @@ -210,6 +219,7 @@ func preparePayloadScript(platformType hyperv1.PlatformType, oauthEnabled bool,
" release.openshift.io/delete: \"true\"",
)
}
stmts = append(stmts, "EOF")
return strings.Join(stmts, "\n")
}

Expand Down Expand Up @@ -239,3 +249,45 @@ func resourcesToRemove(platformType hyperv1.PlatformType) []client.Object {
}
}
}

func discoverCVOReleaseImages(cpContext component.WorkloadContext) (string, string, error) {
var (
controlPlaneReleaseImage string
dataPlaneReleaseImage string
)

pullSecret := common.PullSecret(cpContext.HCP.Namespace)
if err := cpContext.Client.Get(cpContext.Context, client.ObjectKeyFromObject(pullSecret), pullSecret); err != nil {
return "", "", fmt.Errorf("failed to get pull secret for namespace %s: %w", cpContext.HCP.Namespace, err)
}

cpRef, err := registryclient.GetCorrectArchImage(cpContext.Context, "cluster-version-operator", cpContext.HCP.Spec.ReleaseImage, pullSecret.Data[corev1.DockerConfigJsonKey], cpContext.ImageMetadataProvider)
if err != nil {
return "", "", fmt.Errorf("failed to parse control plane release image %s: %w", cpRef, err)
}

_, cpReleaseImageRef, err := cpContext.ImageMetadataProvider.GetDigest(cpContext.Context, cpRef, pullSecret.Data[corev1.DockerConfigJsonKey])
if err != nil {
return "", "", fmt.Errorf("failed to get control plane release image digest %s: %w", cpRef, err)
}

controlPlaneReleaseImage = fmt.Sprintf("%s/%s/%s", cpReleaseImageRef.Registry, cpReleaseImageRef.Namespace, cpReleaseImageRef.NameString())

if cpContext.HCP.Spec.ControlPlaneReleaseImage != nil && *cpContext.HCP.Spec.ControlPlaneReleaseImage != cpContext.HCP.Spec.ReleaseImage {
dpRef, err := registryclient.GetCorrectArchImage(cpContext.Context, "cluster-version-operator", cpContext.HCP.Spec.ReleaseImage, pullSecret.Data[corev1.DockerConfigJsonKey], cpContext.ImageMetadataProvider)
if err != nil {
return "", "", fmt.Errorf("failed to parse data plane release image %s: %w", dpRef, err)
}

_, dpReleaseImageRef, err := cpContext.ImageMetadataProvider.GetDigest(cpContext.Context, dpRef, pullSecret.Data[corev1.DockerConfigJsonKey])
if err != nil {
return "", "", fmt.Errorf("failed to get data plane release image digest %s: %w", dpRef, err)
}

dataPlaneReleaseImage = fmt.Sprintf("%s/%s/%s", dpReleaseImageRef.Registry, dpReleaseImageRef.Namespace, dpReleaseImageRef.NameString())
} else {
dataPlaneReleaseImage = controlPlaneReleaseImage
}

return controlPlaneReleaseImage, dataPlaneReleaseImage, nil
}
5 changes: 5 additions & 0 deletions control-plane-operator/hostedclusterconfigoperator/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,10 @@ func (o *HostedClusterConfigOperator) Run(ctx context.Context) error {
OpenShiftImageRegistryOverrides: imageRegistryOverrides,
}

imageMetaDataProvider := &util.RegistryClientImageMetadataProvider{
OpenShiftImageRegistryOverrides: imageRegistryOverrides,
}

apiReadingClient, err := client.New(mgr.GetConfig(), client.Options{Scheme: hyperapi.Scheme})
if err != nil {
return fmt.Errorf("failed to construct api reading client: %w", err)
Expand Down Expand Up @@ -299,6 +303,7 @@ func (o *HostedClusterConfigOperator) Run(ctx context.Context) error {
OAuthPort: o.OAuthPort,
OperateOnReleaseImage: os.Getenv("OPERATE_ON_RELEASE_IMAGE"),
EnableCIDebugOutput: o.enableCIDebugOutput,
ImageMetaDataProvider: *imageMetaDataProvider,
}
configmetrics.Register(mgr.GetCache())
return operatorConfig.Start(ctx)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,11 @@ package olm
import (
"context"
"fmt"

hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/olm"
"github.com/openshift/hypershift/support/releaseinfo/registryclient"
"github.com/openshift/hypershift/support/util"
corev1 "k8s.io/api/core/v1"
)

Expand All @@ -17,8 +19,8 @@ type OperatorLifecycleManagerParams struct {
OLMCatalogPlacement hyperv1.OLMCatalogPlacement
}

func NewOperatorLifecycleManagerParams(ctx context.Context, hcp *hyperv1.HostedControlPlane, pullSecret *corev1.Secret, digestLister registryclient.DigestListerFN) (*OperatorLifecycleManagerParams, error) {
catalogImages, err := olm.GetCatalogImages(ctx, *hcp, pullSecret.Data[corev1.DockerConfigJsonKey], digestLister)
func NewOperatorLifecycleManagerParams(ctx context.Context, hcp *hyperv1.HostedControlPlane, pullSecret *corev1.Secret, digestLister registryclient.DigestListerFN, imageMetadataProvider util.ImageMetadataProvider) (*OperatorLifecycleManagerParams, error) {
catalogImages, err := olm.GetCatalogImages(ctx, *hcp, pullSecret.Data[corev1.DockerConfigJsonKey], digestLister, imageMetadataProvider)
if err != nil {
return nil, fmt.Errorf("failed to get catalog images: %w", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ type reconciler struct {
oauthPort int32
versions map[string]string
operateOnReleaseImage string
ImageMetaDataProvider util.RegistryClientImageMetadataProvider
registryclient.DigestListerFN
}

Expand Down Expand Up @@ -190,6 +191,7 @@ func Setup(ctx context.Context, opts *operator.HostedClusterConfigOperatorConfig
versions: opts.Versions,
operateOnReleaseImage: opts.OperateOnReleaseImage,
DigestListerFN: registryclient.GetListDigest,
ImageMetaDataProvider: opts.ImageMetaDataProvider,
}})
if err != nil {
return fmt.Errorf("failed to construct controller: %w", err)
Expand Down Expand Up @@ -1687,7 +1689,7 @@ func (r *reconciler) reconcileOLM(ctx context.Context, hcp *hyperv1.HostedContro

olmCatalogImagesOnce.Do(func() {
var err error
p, err = olm.NewOperatorLifecycleManagerParams(ctx, hcp, pullSecret, digestLister)
p, err = olm.NewOperatorLifecycleManagerParams(ctx, hcp, pullSecret, digestLister, &r.ImageMetaDataProvider)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create OperatorLifecycleManagerParams: %w", err))
return
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,9 @@ func TestReconcileErrorHandling(t *testing.T) {
fakeDigestLister := func(ctx context.Context, image string, pullSecret []byte) (digest.Digest, error) {
return "", nil
}
imageMetaDataProvider := supportutil.RegistryClientImageMetadataProvider{
OpenShiftImageRegistryOverrides: map[string][]string{},
}

r := &reconciler{
client: fakeClient,
Expand All @@ -153,6 +156,7 @@ func TestReconcileErrorHandling(t *testing.T) {
hcpNamespace: "bar",
releaseProvider: &fakereleaseprovider.FakeReleaseProvider{},
DigestListerFN: fakeDigestLister,
ImageMetaDataProvider: imageMetaDataProvider,
}
_, err := r.Reconcile(context.Background(), controllerruntime.Request{})
if err != nil {
Expand Down
Loading