8000 [deckhouse-controller] New HookInput Snapshot logic [second-iteration] by RottenRat · Pull Request #13941 · deckhouse/deckhouse · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

[deckhouse-controller] New HookInput Snapshot logic [second-iteration] #13941

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 24 commits into
base: main
Choose a base branch
from
13 changes: 9 additions & 4 deletions ee/modules/030-cloud-provider-vcd/hooks/legacy_mode.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,16 @@ func handleLegacyMode(input *go_hook.HookInput) error {
if len(vcdAPIVers) == 0 {
input.Logger.Warn("VCD API version not defined")

legacyMode := input.Snapshots["legacy_mode"][0].(bool)
if legacyMode {
// legacyMode is set in the provider cluster configuration secret
input.Values.Set("cloudProviderVcd.internal.legacyMode", legacyMode)
snaps, err := sdkobjectpatch.UnmarshalToStruct[bool](input.NewSnapshots, "legacy_mode")
if err != nil {
return fmt.Errorf("failed to unmarshal 'legacy_mode' snapshot: %w", err)
}
if len(snaps) == 0 {
return fmt.Errorf("'legacy_mode' snapshot is empty")
}
legacyMode := snaps[0]
// legacyMode is set in the provider cluster configuration secret
input.Values.Set("cloudProviderVcd.internal.legacyMode", legacyMode)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

only if legacy mode is true

Suggested change
input.Values.Set("cloudProviderVcd.internal.legacyMode", legacyMode)
input.Values.Set("cloudProviderVcd.internal.legacyMode", legacyMode)


return nil
}
Expand Down
19 changes: 11 additions & 8 deletions global-hooks/discovery/virtualization_level.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

sdkpkg "github.com/deckhouse/module-sdk/pkg"
sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch"
)

type masterNodeInfo struct {
Expand Down Expand Up @@ -69,21 +72,21 @@ func applyMasterNodesFilter(obj *unstructured.Unstructured) (go_hook.FilterResul
}

func setGlobalVirtualizationLevel(input *go_hook.HookInput) error {
virtualizationLevel := getVirtualizationLevelFromMasterNodesLabels(input.Snapshots["master_nodes"])
virtualizationLevel := getVirtualizationLevelFromMasterNodesLabels(input.NewSnapshots.Get("master_nodes"))
input.Values.Set("global.discovery.dvpNestingLevel", virtualizationLevel)
input.Logger.Info("set DVP nesting level", slog.Int("level", virtualizationLevel))

return nil
}

func getVirtualizationLevelFromMasterNodesLabels(masterNodeInfoSnaps []go_hook.FilterResult) int {
func getVirtualizationLevelFromMasterNodesLabels(masterNodeInfoSnaps []sdkpkg.Snapshot) int {
minimalVirtualizationLevel := math.MaxInt
for _, masterNodeInfoSnap := range masterNodeInfoSnaps {
masterNodeInfo, ok := masterNodeInfoSnap.(masterNodeInfo)
if ok {
if masterNodeInfo.VirtualizationLevel >= 0 && masterNodeInfo.VirtualizationLevel < minimalVirtualizationLevel {
minimalVirtualizationLevel = masterNodeInfo.VirtualizationLevel
}
for masterNodeInfo, err := range sdkobjectpatch.SnapshotIter[masterNodeInfo](masterNodeInfoSnaps) {
if err != nil {
continue
}
if masterNodeInfo.VirtualizationLevel >= 0 && masterNodeInfo.VirtualizationLevel < minimalVirtualizationLevel {
minimalVirtualizationLevel = masterNodeInfo.VirtualizationLevel
}
}

Expand Down
23 changes: 13 additions & 10 deletions modules/021-cni-cilium/hooks/check_kernel_versions.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

"github.com/deckhouse/module-sdk/pkg"
sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch"

"github.com/deckhouse/deckhouse/go_lib/dependency/requirements"
"github.com/deckhouse/deckhouse/go_lib/set"
"github.com/deckhouse/deckhouse/pkg/log"
Expand Down Expand Up @@ -99,7 +102,7 @@ func handleNodes(input *go_hook.HookInput) error {

input.MetricsCollector.Expire(nodeKernelCheckMetricsGroup)

nodes := input.Snapshots["nodes"]
nodes := input.NewSnapshots.Get("nodes")
if len(nodes) == 0 {
input.Logger.Error("no nodes found")
return nil
Expand Down Expand Up @@ -128,9 +131,11 @@ func handleNodes(input *go_hook.HookInput) error {
// Values is re-set to update the minimum required Linux kernel version depending on the included modules
// The minimum version will later be passed to the cilium agent's cilium initContainer
input.Values.Set("cniCilium.internal.minimalRequiredKernelVersionConstraint", constraint.kernelVersionConstraint)

for _, n := range nodes {
node := n.(nodeKernelVersion)
for node, err := range sdkobjectpatch.SnapshotIter[nodeKernelVersion](nodes) {
if err != nil {
input.Logger.Error("failed to iterate over 'nodes' snapshots", log.Err(err))
continue
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do not continue here. it gives a panic in previous realization. make return

}

kernelVerStr := strings.Split(node.KernelVersion, "-")[0]
nodeSemverVersion, err := semver.NewVersion(kernelVerStr)
Expand Down Expand Up @@ -169,14 +174,12 @@ func handleNodes(input *go_hook.HookInput) error {
}

// Identify the cluster node (as nodeKernelVersion) with the lowest version of the kernel.
func defineMinimalLinuxKernelVersionNode(nodes []go_hook.FilterResult) (*nodeKernelVersion, error) {
func defineMinimalLinuxKernelVersionNode(nodes []pkg.Snapshot) (*nodeKernelVersion, error) {
var minimalNode *nodeKernelVersion
var minimalVersion *semver.Version

for _, n := range nodes {
node, ok := n.(nodeKernelVersion)
if !ok {
return nil, fmt.Errorf("unexpected type: %T", n)
for node, err := range sdkobjectpatch.SnapshotIter[nodeKernelVersion](nodes) {
if err != nil {
return nil, fmt.Errorf("failed to iterate over storage classes: %v", err)
}

kernelVerStr := strings.Split(node.KernelVersion, "-")[0]
Expand Down
11 changes: 9 additions & 2 deletions modules/021-cni-cilium/hooks/discover_vxlan_port.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch"

"github.com/deckhouse/deckhouse/go_lib/set"
)

Expand Down Expand Up @@ -177,9 +179,14 @@ func discoverVXLANPort(input *go_hook.HookInput) error {
input.Logger.Warn("Virtualization nesting level is not set globally - assuming level 0")
}

if len(input.Snapshots["cilium-configmap"]) > 0 {
ports, err := sdkobjectpatch.UnmarshalToStruct[int](input.NewSnapshots, "cilium-configmap")
if err != nil {
return fmt.Errorf("failed to unmarshal 'cilium-configmap' snapshot: %w", err)
}

if len(ports) > 0 {
instStatus = existingInstallation
if port, ok := input.Snapshots["cilium-configmap"][0].(int); ok && port > 0 {
if port := ports[0]; port > 0 {
sourcePort = port
}
}
Expand Down
36 changes: 23 additions & 13 deletions modules/030-cloud-provider-dvp/hooks/discover.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch"

"github.com/deckhouse/deckhouse/dhctl/pkg/config"
cloudDataV1 "github.com/deckhouse/deckhouse/go_lib/cloud-data/apis/v1"
)
Expand Down Expand Up @@ -89,25 +91,25 @@ func applyStorageClassFilter(obj *unstructured.Unstructured) (go_hook.FilterResu
}

func handleCloudProviderDiscoveryDataSecret(input *go_hook.HookInput) error {
if len(input.Snapshots["cloud_provider_discovery_data"]) == 0 {
if len(input.NewSnapshots.Get("cloud_provider_discovery_data")) == 0 {
input.Logger.Warn("failed to find secret 'd8-cloud-provider-discovery-data' in namespace 'kube-system'")

if len(input.Snapshots["storage_classes"]) == 0 {
if len(input.NewSnapshots.Get("storage_classes")) == 0 {
input.Logger.Warn("failed to find storage classes for dvp provisioner")

return nil
}

storageClassesSnapshots := input.Snapshots["storage_classes"]
storageClassesSnapshots := input.NewSnapshots.Get("storage_classes")

storageClasses := make([]storageClass, 0, len(storageClassesSnapshots))

for _, storageClassSnapshot := range storageClassesSnapshots {
sc := storageClassSnapshot.(*storage.StorageClass)

for storageClassSnapshot, err := range sdkobjectpatch.SnapshotIter[storage.StorageClass](storageClassesSnapshots) {
if err != nil {
return fmt.Errorf("failed to iterate over 'storage_classes' snapshots: %v", err)
}
storageClasses = append(storageClasses, storageClass{
Name: sc.Name,
DVPStorageClass: sc.Parameters["dvpStorageClass"],
Name: storageClassSnapshot.Name,
DVPStorageClass: storageClassSnapshot.Parameters["dvpStorageClass"],
})
}
input.Logger.Info("Found DVP storage classes using StorageClass snapshots: %v", storageClasses)
Expand All @@ -117,11 +119,18 @@ func handleCloudProviderDiscoveryDataSecret(input *go_hook.HookInput) error {
return nil
}

secret := input.Snapshots["cloud_provider_discovery_data"][0].(*v1.Secret)
secrets, err := sdkobjectpatch.UnmarshalToStruct[*v1.Secret](input.NewSnapshots, "cloud_provider_discovery_data")
if err != nil {
return fmt.Errorf("failed to unmarshal 'cloud_provider_discovery_data' snapshot: %w", err)
}
if len(secrets) == 0 {
return fmt.Errorf("'cloud_provider_discovery_data' snapshot is empty")
}
secret := secrets[0]

discoveryDataJSON := secret.Data["discovery-data.json"]

_, err := config.ValidateDiscoveryData(&discoveryDataJSON, []string{"/deckhouse/candi/cloud-providers/dvp/openapi"})
_, err = config.ValidateDiscoveryData(&discoveryDataJSON, []string{"/deckhouse/candi/cloud-providers/dvp/openapi"})
if err != nil {
return fmt.Errorf("failed to validate 'discovery-data.json' from 'd8-cloud-provider-discovery-data' secret: %v", err)
}
Expand Down Expand Up @@ -165,9 +174,10 @@ func handleDiscoveryDataVolumeTypes(
}
}

storageClassSnapshots := make(map[string]*storage.StorageClass)
// TODO: review this, looks like deadcode
storageClassSnapshots := make(map[string]storage.StorageClass)
for _, snapshot := range input.Snapshots["storage_classes"] {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@RottenRat refactor this please

s := snapshot.(*storage.StorageClass)
s := snapshot.(storage.StorageClass)
storageClassSnapshots[s.Name] = s
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"

sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch"
)

type SecretEncryptionKey []byte
Expand Down Expand Up @@ -81,14 +83,13 @@ var _ = sdk.RegisterFunc(&go_hook.HookConfig{
}, ensureEncryptionSecretKey)

func ensureEncryptionSecretKey(input *go_hook.HookInput) error {
keys, ok := input.Snapshots["secret_encryption_key"]

keys, err := sdkobjectpatch.UnmarshalToStruct[[]byte](input.NewSnapshots, "secret_encryption_key")
if err != nil {
return fmt.Errorf("failed to unmarshal 'secret_encryption_key' snapshot: %w", err)
}
var secretKey []byte
if ok && len(keys) > 0 {
secretKey, ok = keys[0].([]byte)
if !ok {
return fmt.Errorf("cannot convert Kubernetes Secret to SecretEncryptionKey")
}
if len(keys) > 0 {
secretKey = keys[0]
}

if len(secretKey) == 0 {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,11 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

"github.com/deckhouse/module-sdk/pkg"
sdkobjectpatch "github.com/deckhouse/module-sdk/pkg/object-patch"

"github.com/deckhouse/deckhouse/go_lib/filter"
"github.com/deckhouse/deckhouse/pkg/log"
)

type etcdNode struct {
Expand Down Expand Up @@ -154,8 +158,14 @@ func maintenanceEtcdFilter(unstructured *unstructured.Unstructured) (go_hook.Fil
func getCurrentEtcdQuotaBytes(input *go_hook.HookInput) (int64, string) {
var currentQuotaBytes int64
var nodeWithMaxQuota string
for _, endpointRaw := range input.Snapshots["etcd_endpoints"] {
endpoint := endpointRaw.(*etcdInstance)
etcdEndpointsSnapshots := input.NewSnapshots.Get("etcd_endpoints")
for endpoint, err := range sdkobjectpatch.SnapshotIter[etcdInstance](etcdEndpointsSnapshots) {
if err != nil {
input.Logger.Error("failed to iterate over 'etcd_endpoints' snapshot", log.Err(err))
currentQuotaBytes = defaultEtcdMaxSize
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you must return error here. where did you find this logic?

nodeWithMaxQuota = "default"
break
}
quotaForInstance := endpoint.MaxDbSize
if quotaForInstance > currentQuotaBytes {
currentQuotaBytes = quotaForInstance
Expand All @@ -171,25 +181,31 @@ func getCurrentEtcdQuotaBytes(input *go_hook.HookInput) (int64, string) {
return currentQuotaBytes, nodeWithMaxQuota
}

func getNodeWithMinimalMemory(snapshots []go_hook.FilterResult) *etcdNode {
func getNodeWithMinimalMemory(snapshots []pkg.Snapshot) (*etcdNode, error) {
if len(snapshots) == 0 {
return nil
return nil, fmt.Errorf("'master_nodes' snapshot is empty")
}
var nodeWithMinimalMemory *etcdNode
for node, err := range sdkobjectpatch.SnapshotIter[etcdNode](snapshots) {
if err != nil {
return nil, fmt.Errorf("cannot iterate over 'master_nodes' snapshot: %w", err)
}

if nodeWithMinimalMemory == nil {
nodeWithMinimalMemory = &node
}

node := snapshots[0].(*etcdNode)
for i := 1; i < len(snapshots); i++ {
n := snapshots[i].(*etcdNode)
// for not dedicated nodes we will not set new quota
if !n.IsDedicated {
return n
if !node.IsDedicated {
return &node, nil
}

if n.Memory < node.Memory {
node = n
if node.Memory < nodeWithMinimalMemory.Memory {
*nodeWithMinimalMemory = node
}
}

return node
return nodeWithMinimalMemory, nil
}

func calcNewQuotaForMemory(minimalMemoryNodeBytes int64) int64 {
Expand Down Expand Up @@ -224,10 +240,11 @@ func calcEtcdQuotaBackendBytes(input *go_hook.HookInput) int64 {

input.Logger.Debug("Current etcd quota. Getting from node with max quota", slog.Int64("quota", currentQuotaBytes), slog.String("from", nodeWithMaxQuota))

snaps := input.Snapshots["master_nodes"]
node := getNodeWithMinimalMemory(snaps)
if node == nil {
input.Logger.Warn("Cannot get node with minimal memory")
masterNodeSnapshots := input.NewSnapshots.Get("master_nodes")
node, err := getNodeWithMinimalMemory(masterNodeSnapshots)

if err != nil {
input.Logger.Warn("Cannot get node with minimal memory", log.Err(err))
return currentQuotaBytes
}

Expand Down
Loading
Loading
0