diff --git a/.gomodcheck.yaml b/.gomodcheck.yaml index 3608de331d..3eaff8dc47 100644 --- a/.gomodcheck.yaml +++ b/.gomodcheck.yaml @@ -9,6 +9,10 @@ upstreamRefs: # k8s.io/utils -> conflicts with k/k deps excludedModules: + # Needs a newer version to fix https://github.com/kubernetes-sigs/controller-runtime/issues/3418 + # This should not be needed by the time we update to 1.36 + - sigs.k8s.io/structured-merge-diff/v6 + # --- test dependencies: - github.com/onsi/ginkgo/v2 - github.com/onsi/gomega diff --git a/examples/scratch-env/go.mod b/examples/scratch-env/go.mod index 06f58bb989..523ec66f7d 100644 --- a/examples/scratch-env/go.mod +++ b/examples/scratch-env/go.mod @@ -61,7 +61,7 @@ require ( k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/examples/scratch-env/go.sum b/examples/scratch-env/go.sum index ef2cb38f06..4db6f11f06 100644 --- a/examples/scratch-env/go.sum +++ b/examples/scratch-env/go.sum @@ -164,7 +164,7 @@ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5E sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/go.mod b/go.mod index b06164f275..eb5adfa10e 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( k8s.io/client-go v0.35.0 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index 938f89efc2..9bd1f231d8 100644 --- a/go.sum +++ b/go.sum @@ -247,7 +247,7 @@ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5E sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index a95836b90d..a5ce56de22 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -24,6 +24,8 @@ import ( "slices" "strconv" "strings" + "testing" + "testing/synctest" "time" . "github.com/onsi/ginkgo/v2" @@ -2588,3 +2590,70 @@ func cancelledCtx(ctx context.Context) context.Context { cancel() return cancelCtx } + +type fakeRESTMapper struct { + meta.RESTMapper +} + +func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return &meta.RESTMapping{Scope: meta.RESTScopeNamespace}, nil +} + +func TestReaderWaitsForCacheSync(t *testing.T) { + t.Parallel() + for _, readerFailOnMissingInformer := range []bool{true, false} { + t.Run(fmt.Sprintf("ReaderFailOnMissingInformer=%v", readerFailOnMissingInformer), func(t *testing.T) { + t.Parallel() + synctest.Test(t, func(t *testing.T) { + g := NewWithT(t) + + fakeInformer := &controllertest.FakeInformer{Synced: false} + c, err := cache.New(&rest.Config{}, cache.Options{ + ReaderFailOnMissingInformer: readerFailOnMissingInformer, + Mapper: &fakeRESTMapper{}, + NewInformer: func(kcache.ListerWatcher, runtime.Object, time.Duration, kcache.Indexers) kcache.SharedIndexInformer { + return fakeInformer + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + cacheDone := make(chan struct{}) + go func() { + g.Expect(c.Start(ctx)).To(Succeed()) + close(cacheDone) + }() + synctest.Wait() // Let the cache finish starting + _, err = c.GetInformer(ctx, &corev1.Service{}, cache.BlockUntilSynced(false)) + g.Expect(err).ToNot(HaveOccurred()) + + listCtx, listCtxCancel := context.WithTimeout(ctx, time.Second) + defer listCtxCancel() + services := &corev1.ServiceList{} + err = c.List(listCtx, services) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsTimeout(err)).To(BeTrue()) + + getCtx, getCtxCancel := context.WithTimeout(ctx, time.Second) + defer getCtxCancel() + err = c.Get(getCtx, client.ObjectKey{Name: "default", Namespace: "kubernetes"}, &corev1.Service{}) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsTimeout(err)).To(BeTrue()) + + fakeInformer.SyncedLock.Lock() + fakeInformer.Synced = true + fakeInformer.SyncedLock.Unlock() + + g.Expect(c.List(ctx, services)).To(Succeed()) + + err = c.Get(getCtx, client.ObjectKey{Name: "default", Namespace: "kubernetes"}, &corev1.Service{}) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + + cancel() + <-cacheDone + }) + }) + } +} diff --git a/pkg/cache/informer_cache.go b/pkg/cache/informer_cache.go index 5f0d88fdb0..50dd9a8be1 100644 --- a/pkg/cache/informer_cache.go +++ b/pkg/cache/informer_cache.go @@ -51,16 +51,7 @@ var _ error = (*ErrCacheNotStarted)(nil) // ErrResourceNotCached indicates that the resource type // the client asked the cache for is not cached, i.e. the // corresponding informer does not exist yet. -type ErrResourceNotCached struct { - GVK schema.GroupVersionKind -} - -// Error returns the error -func (r ErrResourceNotCached) Error() string { - return fmt.Sprintf("%s is not cached", r.GVK.String()) -} - -var _ error = (*ErrResourceNotCached)(nil) +type ErrResourceNotCached = internal.ErrResourceNotCached // informerCache is a Kubernetes Object cache populated from internal.Informers. // informerCache wraps internal.Informers. @@ -157,7 +148,7 @@ func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou return nil, err } - _, i, err := ic.Informers.Get(ctx, gvk, obj, applyGetOptions(opts...)) + _, i, err := ic.Informers.Get(ctx, gvk, obj, false, applyGetOptions(opts...)) if err != nil { return nil, err } @@ -171,7 +162,7 @@ func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object, opt return nil, err } - _, i, err := ic.Informers.Get(ctx, gvk, obj, applyGetOptions(opts...)) + _, i, err := ic.Informers.Get(ctx, gvk, obj, false, applyGetOptions(opts...)) if err != nil { return nil, err } @@ -179,15 +170,11 @@ func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object, opt } func (ic *informerCache) getInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *internal.Cache, error) { - if ic.readerFailOnMissingInformer { - cache, started, ok := ic.Informers.Peek(gvk, obj) - if !ok { - return false, nil, &ErrResourceNotCached{GVK: gvk} - } - return started, cache, nil + started, cache, err := ic.Informers.Get(ctx, gvk, obj, ic.readerFailOnMissingInformer, &internal.GetOptions{}) + if err != nil { + return false, nil, err } - - return ic.Informers.Get(ctx, gvk, obj, &internal.GetOptions{}) + return started, cache, nil } // RemoveInformer deactivates and removes the informer from the cache. diff --git a/pkg/cache/internal/informers.go b/pkg/cache/internal/informers.go index 0f921ef63d..619e36abd3 100644 --- a/pkg/cache/internal/informers.go +++ b/pkg/cache/internal/informers.go @@ -45,6 +45,20 @@ import ( var log = logf.RuntimeLog.WithName("cache") +// ErrResourceNotCached indicates that the resource type +// the client asked the cache for is not cached, i.e. the +// corresponding informer does not exist yet. +type ErrResourceNotCached struct { + GVK schema.GroupVersionKind +} + +// Error returns the error +func (r ErrResourceNotCached) Error() string { + return fmt.Sprintf("%s is not cached", r.GVK.String()) +} + +var _ error = (*ErrResourceNotCached)(nil) + // InformersOpts configures an InformerMap. type InformersOpts struct { HTTPClient *http.Client @@ -294,10 +308,13 @@ func (ip *Informers) Peek(gvk schema.GroupVersionKind, obj runtime.Object) (res // Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns // the Informer from the map. -func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object, opts *GetOptions) (bool, *Cache, error) { +func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object, readerFailOnMissingInformer bool, opts *GetOptions) (bool, *Cache, error) { // Return the informer if it is found i, started, ok := ip.Peek(gvk, obj) if !ok { + if readerFailOnMissingInformer { + return false, nil, &ErrResourceNotCached{GVK: gvk} + } var err error if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { return started, nil, err diff --git a/pkg/client/fake/client_test.go b/pkg/client/fake/client_test.go index 209ccc67fe..585b74800b 100644 --- a/pkg/client/fake/client_test.go +++ b/pkg/client/fake/client_test.go @@ -1834,6 +1834,41 @@ var _ = Describe("Fake client", func() { Expect(initial).To(BeComparableTo(actual)) }) + // https://github.com/kubernetes-sigs/controller-runtime/issues/3423 + It("should be able to status apply existing objects that have managedFields set", func(ctx SpecContext) { + cl := NewClientBuilder().WithStatusSubresource(&corev1.Node{}).Build() + node := corev1applyconfigurations.Node("a-node"). + WithSpec(corev1applyconfigurations.NodeSpec().WithPodCIDR("some-value")) + Expect(cl.Apply(ctx, node, client.FieldOwner("test-owner"))).To(Succeed()) + + node = node. + WithStatus(corev1applyconfigurations.NodeStatus().WithPhase(corev1.NodeRunning)) + + Expect(cl.Status().Apply(ctx, node, client.FieldOwner("test-owner"))).To(Succeed()) + }) + + It("should not be able to manually update the fieldManager through a status update", func(ctx SpecContext) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "old-cidr", + }, + } + cl := NewClientBuilder().WithStatusSubresource(&corev1.Node{}).WithObjects(node).WithReturnManagedFields().Build() + node.Spec.PodCIDR = "new-cidr" + Expect(cl.Update(ctx, node, client.FieldOwner("spec-owner"))).To(Succeed()) + + node.ManagedFields = []metav1.ManagedFieldsEntry{{}} + node.Status.Phase = corev1.NodeRunning + + Expect(cl.Status().Update(ctx, node, client.FieldOwner("status-owner"))).To(Succeed()) + Expect(node.ManagedFields).To(HaveLen(2)) + Expect(node.ManagedFields[0].Manager).To(Equal("spec-owner")) + Expect(node.ManagedFields[1].Manager).To(Equal("status-owner")) + }) + It("should Unmarshal the schemaless object with int64 to preserve ints", func(ctx SpecContext) { schemeBuilder := &scheme.Builder{GroupVersion: schema.GroupVersion{Group: "test", Version: "v1"}} schemeBuilder.Register(&WithSchemalessSpec{}) @@ -1884,6 +1919,19 @@ var _ = Describe("Fake client", func() { Expect(obj.Spec).To(BeEquivalentTo(spec)) }) + It("works with types that have an embedded struct pointer", func(ctx SpecContext) { + schemeBuilder := &scheme.Builder{GroupVersion: schema.GroupVersion{Group: "test", Version: "v1"}} + schemeBuilder.Register(&EmbeddedPointerStructCRD{}) + + scheme := runtime.NewScheme() + Expect(schemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + + c := NewClientBuilder().WithScheme(scheme).Build() + + object := &EmbeddedPointerStructCRD{ObjectMeta: metav1.ObjectMeta{Name: "eps"}} + Expect(c.Create(ctx, object)).NotTo(HaveOccurred()) + }) + It("should not change the status of unstructured objects that are configured to have a status subresource on update", func(ctx SpecContext) { obj := &unstructured.Unstructured{} obj.SetAPIVersion("foo/v1") @@ -3371,3 +3419,26 @@ var _ = Describe("Fake client builder", func() { }).To(Panic()) }) }) + +type EmbeddedPointerStructCRD struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitzero"` + *EmbededField `json:",inline"` +} + +type EmbededField struct { + AnInt int `json:"anInt"` +} + +func (in *EmbeddedPointerStructCRD) DeepCopyObject() runtime.Object { + s, err := json.Marshal(in) + if err != nil { + panic(err) + } + var out EmbeddedPointerStructCRD + if err := json.Unmarshal(s, &out); err != nil { + panic(err) + } + + return &out +} diff --git a/pkg/client/fake/versioned_tracker.go b/pkg/client/fake/versioned_tracker.go index bbe3ac9b0d..f22425278d 100644 --- a/pkg/client/fake/versioned_tracker.go +++ b/pkg/client/fake/versioned_tracker.go @@ -231,15 +231,17 @@ func (t versionedTracker) updateObject( } if t.withStatusSubresource.Has(gvk) { - if isStatus { // copy everything but status and metadata.ResourceVersion from original object + if isStatus { // copy everything but status, managedFields and metadata.ResourceVersion from original object if err := copyStatusFrom(obj, oldObject); err != nil { return nil, false, fmt.Errorf("failed to copy non-status field for object with status subresouce: %w", err) } passedRV := accessor.GetResourceVersion() + passedManagedFields := accessor.GetManagedFields() if err := copyFrom(oldObject, obj); err != nil { return nil, false, fmt.Errorf("failed to restore non-status fields: %w", err) } accessor.SetResourceVersion(passedRV) + accessor.SetManagedFields(passedManagedFields) } else { // copy status from original object if err := copyStatusFrom(oldObject, obj); err != nil { return nil, false, fmt.Errorf("failed to copy the status for object with status subresource: %w", err) diff --git a/pkg/controller/controllertest/util.go b/pkg/controller/controllertest/util.go index 8df24fcf57..f6e8b4904d 100644 --- a/pkg/controller/controllertest/util.go +++ b/pkg/controller/controllertest/util.go @@ -18,6 +18,7 @@ package controllertest import ( "context" + "sync" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,7 +30,8 @@ var _ cache.SharedIndexInformer = &FakeInformer{} // FakeInformer provides fake Informer functionality for testing. type FakeInformer struct { // Synced is returned by the HasSynced functions to implement the Informer interface - Synced bool + Synced bool + SyncedLock sync.Mutex // RunCount is incremented each time RunInformersAndControllers is called RunCount int @@ -44,6 +46,9 @@ type fakeHandlerRegistration struct { // HasSynced implements cache.ResourceEventHandlerRegistration. func (f *fakeHandlerRegistration) HasSynced() bool { + f.informer.SyncedLock.Lock() + defer f.informer.SyncedLock.Unlock() + return f.informer.Synced } @@ -54,7 +59,7 @@ func (f *FakeInformer) AddIndexers(indexers cache.Indexers) error { // GetIndexer does nothing. TODO(community): Implement this. func (f *FakeInformer) GetIndexer() cache.Indexer { - return nil + return cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil) } // Informer returns the fake Informer. @@ -64,6 +69,9 @@ func (f *FakeInformer) Informer() cache.SharedIndexInformer { // HasSynced implements the Informer interface. Returns f.Synced. func (f *FakeInformer) HasSynced() bool { + f.SyncedLock.Lock() + defer f.SyncedLock.Unlock() + return f.Synced }