Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
1218e04
Remove Markeplace (#2455)
camilamacedo86 Jan 21, 2026
6e4f192
🐛 Workload should still resilient when catalog is deleted (#2439)
camilamacedo86 Jan 21, 2026
8c575d8
Merge branch 'main' into synchronize
Jan 22, 2026
2a87b10
UPSTREAM: <carry>: Add OpenShift specific files
dtfranz Oct 26, 2023
a924dc5
UPSTREAM: <carry>: Add new tests for single/own namespaces install modes
camilamacedo86 Oct 6, 2025
6e86e9d
UPSTREAM: <carry>: Upgrade OCP image from 4.20 to 4.21
camilamacedo86 Oct 13, 2025
4203550
UPSTREAM: <carry>: [Default Catalog Tests] - Change logic to get ocp …
camilamacedo86 Oct 13, 2025
081b540
UPSTREAM: <carry>: Update OCP catalogs to v4.21
tmshort Oct 13, 2025
7c5dc27
UPSTREAM: <carry>: support singleown cases in disconnected
kuiwang02 Oct 16, 2025
60cea82
UPSTREAM: <carry>: fix cases 81696 and 74618 for product code changes
kuiwang02 Oct 17, 2025
1e23b40
UPSTREAM: <carry>: Define Default timeouts and apply their usage accr…
camilamacedo86 Oct 22, 2025
98678e9
UPSTREAM: <carry>: Update to new feature-gate options in helm
tmshort Oct 22, 2025
a07b36b
UPSTREAM: <carry>: Fix flake for single/own ns tests by ensuring uniq…
camilamacedo86 Oct 22, 2025
5a0e8d8
UPSTREAM: <carry>: [OTE]: Enhance single/own ns based on review comme…
camilamacedo86 Oct 24, 2025
261a602
UPSTREAM: <carry>: Update OwnSingle template to use spec.config.inlin…
kuiwang02 Nov 3, 2025
b53bc56
UPSTREAM: <carry>: [OTE]: Add webhook cleanup validation on extension…
camilamacedo86 Nov 4, 2025
1591dc3
UPSTREAM: <carry>: Add [OTP] to migrated cases
kuiwang02 Nov 7, 2025
9c5bdea
UPSTREAM: <carry>: [OTE]: Upgrade dependencies used
camilamacedo86 Nov 5, 2025
01eaf3e
UPSTREAM: <carry>: fix(OTE): fix OpenShift Kubernetes replace version…
camilamacedo86 Nov 10, 2025
e968860
UPSTREAM: <carry>: [Default Catalog Tests] Upgrade go 1.24.6 and depe…
camilamacedo86 Nov 11, 2025
ff81893
UPSTREAM: <carry>: add disconnected environment support with custom p…
kuiwang02 Nov 12, 2025
32f703f
UPSTREAM: <carry>: migrate jiazha test cases to OTE
jianzhangbjz Nov 14, 2025
679830f
UPSTREAM: <carry>: migrate clustercatalog case to ote
Xia-Zhao-rh Oct 17, 2025
2e52cc0
UPSTREAM: <carry>: migrate olmv1 QE stress cases
kuiwang02 Nov 20, 2025
62ffb4f
UPSTREAM: <carry>: Use busybox/httpd to simulate probes
tmshort Nov 25, 2025
4684599
UPSTREAM: <carry>: migrate olmv1 QE cases
Xia-Zhao-rh Nov 25, 2025
aca3614
UPSTREAM: <carry>: add agent for olmv1 qe cases
kuiwang02 Oct 21, 2025
afa3cf3
UPSTREAM: <carry>: Disable upstream PodDisruptionBudget
tmshort Dec 3, 2025
10b91bf
UPSTREAM: <carry>: Add AGENTS.md for AI code contributions
rashmigottipati Dec 11, 2025
4cfaae8
UPSTREAM: <carry>: address review comments through addl prompts
rashmigottipati Dec 11, 2025
739fe9d
UPSTREAM: <carry>: addressing some more review comments
rashmigottipati Dec 11, 2025
0047415
UPSTREAM: <carry>: remove DCO line
rashmigottipati Dec 11, 2025
79b0f29
UPSTREAM: <carry>: migrate bandrade test cases to OTE
bandrade Nov 18, 2025
7e51ad9
UPSTREAM: <carry>: update metadata
bandrade Dec 3, 2025
f2e84b4
UPSTREAM: <carry>: remove originalName
bandrade Dec 3, 2025
405e547
UPSTREAM: <carry>: update 80458's timeout to 180s
jianzhangbjz Dec 8, 2025
1553c8b
UPSTREAM: <carry>: update 83026 to specify the clustercatalog
jianzhangbjz Dec 15, 2025
5090503
UPSTREAM: <carry>: Update to golang 1.25 and ocp 4.22
oceanc80 Dec 18, 2025
45df204
UPSTREAM: <carry>: Use oc client for running e2e tests
pedjak Jan 13, 2026
09c1ca6
UPSTREAM: <carry>: Run upstream e2e tests tagged with `@catalogd-update`
pedjak Jan 14, 2026
25ff935
UPSTREAM: <carry>: enhance case to make it more stable
kuiwang02 Jan 6, 2026
e728e7d
UPSTREAM: <carry>: add service account to curl job
ehearne-redhat Jan 7, 2026
1989a06
UPSTREAM: <carry>: move sa creation out of buildCurlJob()
ehearne-redhat Jan 8, 2026
64efd75
UPSTREAM: <carry>: comment out delete service account
ehearne-redhat Jan 9, 2026
72ef65c
UPSTREAM: <carry>: move defercleanup for sa for LIFO
ehearne-redhat Jan 9, 2026
a59652f
UPSTREAM: <carry>: add polling so job fully deleted before proceed
ehearne-redhat Jan 12, 2026
d7f1ffc
UPSTREAM: <carry>: Revert "Merge pull request #594 from ehearne-redha…
sosiouxme Jan 20, 2026
f61c0c0
UPSTREAM: <carry>: Remove openshift-redhat-marketplace catalog tests
camilamacedo86 Jan 8, 2026
c6ae7c2
UPSTREAM: <drop>: go mod vendor
Jan 22, 2026
f2c8e16
UPSTREAM: <drop>: remove upstream GitHub configuration
Jan 22, 2026
029c34e
UPSTREAM: <drop>: configure the commit-checker
Jan 22, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/operator-controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -629,7 +629,7 @@ func (c *boxcutterReconcilerConfigurator) Configure(ceReconciler *controllers.Cl
controllers.HandleFinalizers(c.finalizers),
controllers.MigrateStorage(storageMigrator),
controllers.RetrieveRevisionStates(revisionStatesGetter),
controllers.ResolveBundle(c.resolver),
controllers.ResolveBundle(c.resolver, c.mgr.GetClient()),
controllers.UnpackBundle(c.imagePuller, c.imageCache),
controllers.ApplyBundleWithBoxcutter(appl.Apply),
}
Expand Down Expand Up @@ -748,7 +748,7 @@ func (c *helmReconcilerConfigurator) Configure(ceReconciler *controllers.Cluster
ceReconciler.ReconcileSteps = []controllers.ReconcileStepFunc{
controllers.HandleFinalizers(c.finalizers),
controllers.RetrieveRevisionStates(revisionStatesGetter),
controllers.ResolveBundle(c.resolver),
controllers.ResolveBundle(c.resolver, c.mgr.GetClient()),
controllers.UnpackBundle(c.imagePuller, c.imageCache),
controllers.ApplyBundle(appl),
}
Expand Down
2 changes: 1 addition & 1 deletion commitchecker.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
expectedMergeBase: 8167ff8fd3ab3880459f9d1e5626c6e3073e3428
expectedMergeBase: 6e4f192699f5c039fa2b92b01372a150274447bd
upstreamBranch: main
upstreamOrg: operator-framework
upstreamRepo: operator-controller

This file was deleted.

47 changes: 32 additions & 15 deletions internal/operator-controller/applier/boxcutter.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,21 +312,38 @@ func (bc *Boxcutter) createOrUpdate(ctx context.Context, user user.Info, rev *oc
return bc.Client.Patch(ctx, rev, client.Apply, client.FieldOwner(bc.FieldOwner), client.ForceOwnership)
}

func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) error {
// Generate desired revision
desiredRevision, err := bc.RevisionGenerator.GenerateRevision(ctx, contentFS, ext, objectLabels, revisionAnnotations)
func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (bool, string, error) {
// List existing revisions first to validate cluster connectivity before checking contentFS.
// This ensures we fail fast on API errors rather than attempting fallback behavior when
// cluster access is unavailable (since the ClusterExtensionRevision controller also requires
// API access to maintain resources). The revision list is also needed to determine if fallback
// is possible when contentFS is nil (at least one revision must exist).
existingRevisions, err := bc.getExistingRevisions(ctx, ext.GetName())
if err != nil {
return err
return false, "", err
}

if err := controllerutil.SetControllerReference(ext, desiredRevision, bc.Scheme); err != nil {
return fmt.Errorf("set ownerref: %w", err)
// If contentFS is nil, we're maintaining the current state without catalog access.
// In this case, we should use the existing installed revision without generating a new one.
if contentFS == nil {
if len(existingRevisions) == 0 {
return false, "", fmt.Errorf("catalog content unavailable and no revision installed")
}
// Returning true here signals that the rollout has succeeded using the current revision.
// This assumes the ClusterExtensionRevision controller is running and will continue to
// reconcile, apply, and maintain the resources defined in that revision via Server-Side Apply,
// ensuring the workload keeps running even when catalog access is unavailable.
return true, "", nil
}

// List all existing revisions
existingRevisions, err := bc.getExistingRevisions(ctx, ext.GetName())
// Generate desired revision
desiredRevision, err := bc.RevisionGenerator.GenerateRevision(ctx, contentFS, ext, objectLabels, revisionAnnotations)
if err != nil {
return err
return false, "", err
}

if err := controllerutil.SetControllerReference(ext, desiredRevision, bc.Scheme); err != nil {
return false, "", fmt.Errorf("set ownerref: %w", err)
}

currentRevision := &ocv1.ClusterExtensionRevision{}
Expand All @@ -348,7 +365,7 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust
// inplace patch was successful, no changes in phases
state = StateUnchanged
default:
return fmt.Errorf("patching %s Revision: %w", desiredRevision.Name, err)
return false, "", fmt.Errorf("patching %s Revision: %w", desiredRevision.Name, err)
}
}

Expand All @@ -362,7 +379,7 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust
case StateNeedsInstall:
err := preflight.Install(ctx, plainObjs)
if err != nil {
return err
return false, "", err
}
// TODO: jlanford's IDE says that "StateNeedsUpgrade" condition is always true, but
// it isn't immediately obvious why that is. Perhaps len(existingRevisions) is
Expand All @@ -371,7 +388,7 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust
case StateNeedsUpgrade:
err := preflight.Upgrade(ctx, plainObjs)
if err != nil {
return err
return false, "", err
}
}
}
Expand All @@ -385,15 +402,15 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust
desiredRevision.Spec.Revision = revisionNumber

if err = bc.garbageCollectOldRevisions(ctx, prevRevisions); err != nil {
return fmt.Errorf("garbage collecting old revisions: %w", err)
return false, "", fmt.Errorf("garbage collecting old revisions: %w", err)
}

if err := bc.createOrUpdate(ctx, getUserInfo(ext), desiredRevision); err != nil {
return fmt.Errorf("creating new Revision: %w", err)
return false, "", fmt.Errorf("creating new Revision: %w", err)
}
}

return nil
return true, "", nil
}

// runPreAuthorizationChecks runs PreAuthorization checks if the PreAuthorizer is set. An error will be returned if
Expand Down
10 changes: 8 additions & 2 deletions internal/operator-controller/applier/boxcutter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -991,14 +991,18 @@ func TestBoxcutter_Apply(t *testing.T) {
labels.PackageNameKey: "test-package",
}
}
err := boxcutter.Apply(t.Context(), testFS, ext, nil, revisionAnnotations)
completed, status, err := boxcutter.Apply(t.Context(), testFS, ext, nil, revisionAnnotations)

// Assert
if tc.expectedErr != "" {
require.Error(t, err)
assert.Contains(t, err.Error(), tc.expectedErr)
assert.False(t, completed)
assert.Empty(t, status)
} else {
require.NoError(t, err)
assert.True(t, completed)
assert.Empty(t, status)
}

if tc.validate != nil {
Expand Down Expand Up @@ -1190,10 +1194,12 @@ func Test_PreAuthorizer_Integration(t *testing.T) {
RevisionGenerator: dummyGenerator,
PreAuthorizer: tc.preAuthorizer(t),
}
err := boxcutter.Apply(t.Context(), dummyBundleFs, ext, nil, revisionAnnotations)
completed, status, err := boxcutter.Apply(t.Context(), dummyBundleFs, ext, nil, revisionAnnotations)
if tc.validate != nil {
tc.validate(t, err)
}
_ = completed
_ = status
})
}
}
Expand Down
66 changes: 66 additions & 0 deletions internal/operator-controller/applier/helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,16 @@ func (h *Helm) runPreAuthorizationChecks(ctx context.Context, ext *ocv1.ClusterE
}

func (h *Helm) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels map[string]string, storageLabels map[string]string) (bool, string, error) {
// If contentFS is nil, we're maintaining the current state without catalog access.
// In this case, reconcile the existing Helm release if it exists.
if contentFS == nil {
ac, err := h.ActionClientGetter.ActionClientFor(ctx, ext)
if err != nil {
return false, "", err
}
return h.reconcileExistingRelease(ctx, ac, ext)
}

chrt, err := h.buildHelmChart(contentFS, ext)
if err != nil {
return false, "", err
Expand Down Expand Up @@ -178,6 +188,62 @@ func (h *Helm) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExte
return true, "", nil
}

// reconcileExistingRelease reconciles an existing Helm release without catalog access.
// This is used when the catalog is unavailable but we need to maintain the current installation.
// It reconciles the release to actively maintain resources, and sets up watchers for monitoring/observability.
func (h *Helm) reconcileExistingRelease(ctx context.Context, ac helmclient.ActionInterface, ext *ocv1.ClusterExtension) (bool, string, error) {
rel, err := ac.Get(ext.GetName())
if errors.Is(err, driver.ErrReleaseNotFound) {
return false, "", fmt.Errorf("catalog content unavailable and no release installed")
}
if err != nil {
return false, "", fmt.Errorf("failed to get current release: %w", err)
}

// Reconcile the existing release to ensure resources are maintained
if err := ac.Reconcile(rel); err != nil {
// Reconcile failed - resources NOT maintained
// Return false (rollout failed) with error
return false, "", err
}

// At this point: Reconcile succeeded - resources ARE maintained (applied to cluster via Server-Side Apply)
// The operations below are for setting up watches to detect drift (i.e., if someone manually modifies the
// resources). If watch setup fails, the resources are still successfully maintained, but we won't detect
// and auto-correct manual modifications. We return true (rollout succeeded) and log watch errors.
logger := klog.FromContext(ctx)

relObjects, err := util.ManifestObjects(strings.NewReader(rel.Manifest), fmt.Sprintf("%s-release-manifest", rel.Name))
if err != nil {
logger.Error(err, "failed to parse manifest objects, cannot set up drift detection watches (resources are applied but drift detection disabled)")
return true, "", nil
}

logger.V(1).Info("setting up drift detection watches on managed objects")

// Defensive nil checks to prevent panics if Manager or Watcher not properly initialized
if h.Manager == nil {
logger.Error(fmt.Errorf("manager is nil"), "Manager not initialized, cannot set up drift detection watches (resources are applied but drift detection disabled)")
return true, "", nil
}
cache, err := h.Manager.Get(ctx, ext)
if err != nil {
logger.Error(err, "failed to get managed content cache, cannot set up drift detection watches (resources are applied but drift detection disabled)")
return true, "", nil
}

if h.Watcher == nil {
logger.Error(fmt.Errorf("watcher is nil"), "Watcher not initialized, cannot set up drift detection watches (resources are applied but drift detection disabled)")
return true, "", nil
}
if err := cache.Watch(ctx, h.Watcher, relObjects...); err != nil {
logger.Error(err, "failed to set up drift detection watches (resources are applied but drift detection disabled)")
return true, "", nil
}

return true, "", nil
}

func (h *Helm) buildHelmChart(bundleFS fs.FS, ext *ocv1.ClusterExtension) (*chart.Chart, error) {
if h.HelmChartProvider == nil {
return nil, errors.New("HelmChartProvider is nil")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func MigrateStorage(m StorageMigrator) ReconcileStepFunc {
}
}

func ApplyBundleWithBoxcutter(apply func(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) error) ReconcileStepFunc {
func ApplyBundleWithBoxcutter(apply func(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (bool, string, error)) ReconcileStepFunc {
return func(ctx context.Context, state *reconcileState, ext *ocv1.ClusterExtension) (*ctrl.Result, error) {
l := log.FromContext(ctx)
revisionAnnotations := map[string]string{
Expand All @@ -109,7 +109,8 @@ func ApplyBundleWithBoxcutter(apply func(ctx context.Context, contentFS fs.FS, e
}

l.Info("applying bundle contents")
if err := apply(ctx, state.imageFS, ext, objLbls, revisionAnnotations); err != nil {
_, _, err := apply(ctx, state.imageFS, ext, objLbls, revisionAnnotations)
if err != nil {
// If there was an error applying the resolved bundle,
// report the error via the Progressing condition.
setStatusProgressing(ext, wrapErrorWithResolutionInfo(state.resolvedRevisionMetadata.BundleMetadata, err))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,8 @@ func TestApplyBundleWithBoxcutter(t *testing.T) {
imageFS: fstest.MapFS{},
}

stepFunc := ApplyBundleWithBoxcutter(func(_ context.Context, _ fs.FS, _ *ocv1.ClusterExtension, _, _ map[string]string) error {
return nil
stepFunc := ApplyBundleWithBoxcutter(func(_ context.Context, _ fs.FS, _ *ocv1.ClusterExtension, _, _ map[string]string) (bool, string, error) {
return true, "", nil
})
result, err := stepFunc(ctx, state, ext)
require.NoError(t, err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ import (
)

func TestClusterExtensionSourceConfig(t *testing.T) {
sourceTypeEmptyError := "Invalid value: null"
// NOTE: Kubernetes validation error format for JSON null values varies across K8s versions.
// We check for the common part "Invalid value:" which appears in all versions.
sourceTypeEmptyError := "Invalid value:"
sourceTypeMismatchError := "spec.source.sourceType: Unsupported value"
sourceConfigInvalidError := "spec.source: Invalid value"
// unionField represents the required Catalog or (future) Bundle field required by SourceConfig
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,8 @@ func (r *ClusterExtensionReconciler) Reconcile(ctx context.Context, req ctrl.Req

// ensureAllConditionsWithReason checks that all defined condition types exist in the given ClusterExtension,
// and assigns a specified reason and custom message to any missing condition.
//
//nolint:unparam // reason parameter is designed to be flexible, even if current callers use the same value
func ensureAllConditionsWithReason(ext *ocv1.ClusterExtension, reason v1alpha1.ConditionReason, message string) {
for _, condType := range conditionsets.ConditionTypes {
cond := apimeta.FindStatusCondition(ext.Status.Conditions, condType)
Expand Down
Loading