-
Notifications
You must be signed in to change notification settings - Fork 127
[deckhouse-controller] New HookInput Snapshot logic [second-iteration] #13941
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
57e453b
721586c
12a1932
0cb0c90
3d28f1c
4aed1ef
60d7769
1e04b97
963379b
551d833
bf51d15
9b6653d
96cee47
cd87474
e2a1397
ee8c27b
ad00642
661bccf
0bc84ae
fab2704
01d6487
a7128c9
1afc13d
1922f5a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -39,6 +39,9 @@ import ( | |
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
|
||
"github.com/deckhouse/module-sdk/pkg" | ||
sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch" | ||
|
||
"github.com/deckhouse/deckhouse/go_lib/dependency/requirements" | ||
"github.com/deckhouse/deckhouse/go_lib/set" | ||
"github.com/deckhouse/deckhouse/pkg/log" | ||
|
@@ -99,7 +102,7 @@ func handleNodes(input *go_hook.HookInput) error { | |
|
||
input.MetricsCollector.Expire(nodeKernelCheckMetricsGroup) | ||
|
||
nodes := input.Snapshots["nodes"] | ||
nodes := input.NewSnapshots.Get("nodes") | ||
if len(nodes) == 0 { | ||
input.Logger.Error("no nodes found") | ||
return nil | ||
|
@@ -128,9 +131,11 @@ func handleNodes(input *go_hook.HookInput) error { | |
// Values is re-set to update the minimum required Linux kernel version depending on the included modules | ||
// The minimum version will later be passed to the cilium agent's cilium initContainer | ||
input.Values.Set("cniCilium.internal.minimalRequiredKernelVersionConstraint", constraint.kernelVersionConstraint) | ||
|
||
for _, n := range nodes { | ||
node := n.(nodeKernelVersion) | ||
for node, err := range sdkobjectpatch.SnapshotIter[nodeKernelVersion](nodes) { | ||
if err != nil { | ||
input.Logger.Error("failed to iterate over 'nodes' snapshots", log.Err(err)) | ||
continue | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do not continue here. it gives a panic in previous realization. make return |
||
} | ||
|
||
kernelVerStr := strings.Split(node.KernelVersion, "-")[0] | ||
nodeSemverVersion, err := semver.NewVersion(kernelVerStr) | ||
|
@@ -169,14 +174,12 @@ func handleNodes(input *go_hook.HookInput) error { | |
} | ||
|
||
// Identify the cluster node (as nodeKernelVersion) with the lowest version of the kernel. | ||
func defineMinimalLinuxKernelVersionNode(nodes []go_hook.FilterResult) (*nodeKernelVersion, error) { | ||
func defineMinimalLinuxKernelVersionNode(nodes []pkg.Snapshot) (*nodeKernelVersion, error) { | ||
var minimalNode *nodeKernelVersion | ||
var minimalVersion *semver.Version | ||
|
||
for _, n := range nodes { | ||
node, ok := n.(nodeKernelVersion) | ||
if !ok { | ||
return nil, fmt.Errorf("unexpected type: %T", n) | ||
for node, err := range sdkobjectpatch.SnapshotIter[nodeKernelVersion](nodes) { | ||
if err != nil { | ||
return nil, fmt.Errorf("failed to iterate over storage classes: %v", err) | ||
} | ||
|
||
kernelVerStr := strings.Split(node.KernelVersion, "-")[0] | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -32,6 +32,8 @@ import ( | |
meta "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
|
||
sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch" | ||
|
||
"github.com/deckhouse/deckhouse/dhctl/pkg/config" | ||
cloudDataV1 "github.com/deckhouse/deckhouse/go_lib/cloud-data/apis/v1" | ||
) | ||
|
@@ -89,25 +91,25 @@ func applyStorageClassFilter(obj *unstructured.Unstructured) (go_hook.FilterResu | |
} | ||
|
||
func handleCloudProviderDiscoveryDataSecret(input *go_hook.HookInput) error { | ||
if len(input.Snapshots["cloud_provider_discovery_data"]) == 0 { | ||
if len(input.NewSnapshots.Get("cloud_provider_discovery_data")) == 0 { | ||
input.Logger.Warn("failed to find secret 'd8-cloud-provider-discovery-data' in namespace 'kube-system'") | ||
|
||
if len(input.Snapshots["storage_classes"]) == 0 { | ||
if len(input.NewSnapshots.Get("storage_classes")) == 0 { | ||
input.Logger.Warn("failed to find storage classes for dvp provisioner") | ||
|
||
return nil | ||
} | ||
|
||
storageClassesSnapshots := input.Snapshots["storage_classes"] | ||
storageClassesSnapshots := input.NewSnapshots.Get("storage_classes") | ||
|
||
storageClasses := make([]storageClass, 0, len(storageClassesSnapshots)) | ||
|
||
for _, storageClassSnapshot := range storageClassesSnapshots { | ||
sc := storageClassSnapshot.(*storage.StorageClass) | ||
|
||
for storageClassSnapshot, err := range sdkobjectpatch.SnapshotIter[storage.StorageClass](storageClassesSnapshots) { | ||
if err != nil { | ||
return fmt.Errorf("failed to iterate over 'storage_classes' snapshots: %v", err) | ||
} | ||
storageClasses = append(storageClasses, storageClass{ | ||
Name: sc.Name, | ||
DVPStorageClass: sc.Parameters["dvpStorageClass"], | ||
Name: storageClassSnapshot.Name, | ||
DVPStorageClass: storageClassSnapshot.Parameters["dvpStorageClass"], | ||
}) | ||
} | ||
input.Logger.Info("Found DVP storage classes using StorageClass snapshots: %v", storageClasses) | ||
|
@@ -117,11 +119,18 @@ func handleCloudProviderDiscoveryDataSecret(input *go_hook.HookInput) error { | |
return nil | ||
} | ||
|
||
secret := input.Snapshots["cloud_provider_discovery_data"][0].(*v1.Secret) | ||
secrets, err := sdkobjectpatch.UnmarshalToStruct[*v1.Secret](input.NewSnapshots, "cloud_provider_discovery_data") | ||
if err != nil { | ||
return fmt.Errorf("failed to unmarshal 'cloud_provider_discovery_data' snapshot: %w", err) | ||
} | ||
if len(secrets) == 0 { | ||
return fmt.Errorf("'cloud_provider_discovery_data' snapshot is empty") | ||
} | ||
secret := secrets[0] | ||
|
||
discoveryDataJSON := secret.Data["discovery-data.json"] | ||
|
||
_, err := config.ValidateDiscoveryData(&discoveryDataJSON, []string{"/deckhouse/candi/cloud-providers/dvp/openapi"}) | ||
_, err = config.ValidateDiscoveryData(&discoveryDataJSON, []string{"/deckhouse/candi/cloud-providers/dvp/openapi"}) | ||
if err != nil { | ||
return fmt.Errorf("failed to validate 'discovery-data.json' from 'd8-cloud-provider-discovery-data' secret: %v", err) | ||
} | ||
|
@@ -165,9 +174,10 @@ func handleDiscoveryDataVolumeTypes( | |
} | ||
} | ||
|
||
storageClassSnapshots := make(map[string]*storage.StorageClass) | ||
// TODO: review this, looks like deadcode | ||
storageClassSnapshots := make(map[string]storage.StorageClass) | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @RottenRat refactor this please |
||
s := snapshot.(*storage.StorageClass) | ||
s := snapshot.(storage.StorageClass) | ||
storageClassSnapshots[s.Name] = s | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -31,7 +31,11 @@ import ( | |
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
|
||
"github.com/deckhouse/module-sdk/pkg" | ||
sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch" | ||
|
||
"github.com/deckhouse/deckhouse/go_lib/filter" | ||
"github.com/deckhouse/deckhouse/pkg/log" | ||
) | ||
|
||
type etcdNode struct { | ||
|
@@ -154,8 +158,14 @@ func maintenanceEtcdFilter(unstructured *unstructured.Unstructured) (go_hook.Fil | |
func getCurrentEtcdQuotaBytes(input *go_hook.HookInput) (int64, string) { | ||
var currentQuotaBytes int64 | ||
var nodeWithMaxQuota string | ||
for _, endpointRaw := range input.Snapshots["etcd_endpoints"] { | ||
endpoint := endpointRaw.(*etcdInstance) | ||
etcdEndpointsSnapshots := input.NewSnapshots.Get("etcd_endpoints") | ||
for endpoint, err := range sdkobjectpatch.SnapshotIter[etcdInstance](etcdEndpointsSnapshots) { | ||
if err != nil { | ||
input.Logger.Error("failed to iterate over 'etcd_endpoints' snapshot", log.Err(err)) | ||
currentQuotaBytes = defaultEtcdMaxSize | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you must return error here. where did you find this logic? |
||
nodeWithMaxQuota = "default" | ||
break | ||
} | ||
quotaForInstance := endpoint.MaxDbSize | ||
if quotaForInstance > currentQuotaBytes { | ||
currentQuotaBytes = quotaForInstance | ||
|
@@ -171,25 +181,31 @@ func getCurrentEtcdQuotaBytes(input *go_hook.HookInput) (int64, string) { | |
return currentQuotaBytes, nodeWithMaxQuota | ||
} | ||
|
||
func getNodeWithMinimalMemory(snapshots []go_hook.FilterResult) *etcdNode { | ||
func getNodeWithMinimalMemory(snapshots []pkg.Snapshot) (*etcdNode, error) { | ||
if len(snapshots) == 0 { | ||
return nil | ||
return nil, fmt.Errorf("'master_nodes' snapshot is empty") | ||
} | ||
var nodeWithMinimalMemory *etcdNode | ||
for node, err := range sdkobjectpatch.SnapshotIter[etcdNode](snapshots) { | ||
if err != nil { | ||
return nil, fmt.Errorf("cannot iterate over 'master_nodes' snapshot: %w", err) | ||
} | ||
|
||
if nodeWithMinimalMemory == nil { | ||
nodeWithMinimalMemory = &node | ||
} | ||
|
||
node := snapshots[0].(*etcdNode) | ||
for i := 1; i < len(snapshots); i++ { | ||
n := snapshots[i].(*etcdNode) | ||
// for not dedicated nodes we will not set new quota | ||
if !n.IsDedicated { | ||
return n | ||
if !node.IsDedicated { | ||
return &node, nil | ||
} | ||
|
||
if n.Memory < node.Memory { | ||
node = n | ||
if node.Memory < nodeWithMinimalMemory.Memory { | ||
*nodeWithMinimalMemory = node | ||
} | ||
} | ||
|
||
return node | ||
return nodeWithMinimalMemory, nil | ||
} | ||
|
||
func calcNewQuotaForMemory(minimalMemoryNodeBytes int64) int64 { | ||
|
@@ -224,10 +240,11 @@ func calcEtcdQuotaBackendBytes(input *go_hook.HookInput) int64 { | |
|
||
input.Logger.Debug("Current etcd quota. Getting from node with max quota", slog.Int64("quota", currentQuotaBytes), slog.String("from", nodeWithMaxQuota)) | ||
|
||
snaps := input.Snapshots["master_nodes"] | ||
node := getNodeWithMinimalMemory(snaps) | ||
if node == nil { | ||
input.Logger.Warn("Cannot get node with minimal memory") | ||
masterNodeSnapshots := input.NewSnapshots.Get("master_nodes") | ||
node, err := getNodeWithMinimalMemory(masterNodeSnapshots) | ||
|
||
if err != nil { | ||
input.Logger.Warn("Cannot get node with minimal memory", log.Err(err)) | ||
return currentQuotaBytes | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
only if legacy mode is true