Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions apis/druid/v1alpha1/druid_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,17 @@ type DruidSpec struct {
// +optional
Affinity *v1.Affinity `json:"affinity,omitempty"`

// OrderOfUpgrade defines the order in which node types are upgraded during a rolling deploy.
// If not specified, the default order is used: historical, overlord, middleManager, indexer, broker, coordinator, router.
// +optional
OrderOfUpgrade []string `json:"orderOfUpgrade,omitempty"`
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like the generated CRD manifests may still need to be included for these new API fields.

I see zz_generated.deepcopy.go was updated in the PR, but running make manifests generate locally still produced additional generated drift, especially in config/crd/bases/druid.apache.org_druids.yaml and chart/crds/druid.apache.org_druids.yaml.

Without those CRD updates, users installing from the manifests or Helm chart may not be able to persist orderOfUpgrade, orderOfUpgradeOfTiers, or tier as expected.


// OrderOfUpgradeOfTiers defines the order of tier upgrades within each node type.
// The key is the node type (e.g. "historical") and the value is an ordered list of tier names.
// Nodes matching a tier earlier in the list are upgraded first.
// +optional
OrderOfUpgradeOfTiers map[string][]string `json:"orderOfUpgradeOfTiers,omitempty"`

// Nodes a list of `Druid` Node types and their configurations.
// `DruidSpec` is used to create Kubernetes workload specs. Many of the fields above can be overridden at the specific
// `NodeSpec` level.
Expand Down Expand Up @@ -329,6 +340,12 @@ type DruidNodeSpec struct {
// +kubebuilder:validation:Enum:=historical;overlord;middleManager;indexer;broker;coordinator;router
NodeType string `json:"nodeType"`

// Tier defines the tier this node belongs to within its node type (e.g. "hot", "cold", "glacier").
// Used together with OrderOfUpgradeOfTiers in DruidSpec to control the order of upgrades
// for nodes of the same type.
// +optional
Tier string `json:"tier,omitempty"`

// DruidPort Used by the `Druid` process.
// +required
DruidPort int32 `json:"druid.port"`
Expand Down
18 changes: 18 additions & 0 deletions apis/druid/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

79 changes: 64 additions & 15 deletions controllers/druid/ordering.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,36 +18,85 @@ under the License.
*/
package druid

import "github.com/apache/druid-operator/apis/druid/v1alpha1"
import (
"sort"

"github.com/apache/druid-operator/apis/druid/v1alpha1"
)

var (
druidServicesOrder = []string{historical, overlord, middleManager, indexer, broker, coordinator, router}
defaultDruidServicesOrder = []string{historical, overlord, middleManager, indexer, broker, coordinator, router}
)

type ServiceGroup struct {
key string
spec v1alpha1.DruidNodeSpec
key string
nodeType string
tier string
spec v1alpha1.DruidNodeSpec
}

// getNodeSpecsByOrder returns all NodeSpecs f a given Druid object.
// Recommended order is described at http://druid.io/docs/latest/operations/rolling-updates.html
func getNodeSpecsByOrder(m *v1alpha1.Druid) []*ServiceGroup {
nodeTypeOrder := defaultDruidServicesOrder
if len(m.Spec.OrderOfUpgrade) > 0 {
nodeTypeOrder = m.Spec.OrderOfUpgrade
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we make this path a little more defensive?

If orderOfUpgrade is set, it looks like the reconciler only returns node groups whose nodeType appears in that list. For a partial list, typo, or unknown node type, configured nodes could be skipped entirely.

Since deployDruidCluster uses this returned list to populate the resource name maps used by cleanup, skipped node groups may later look unused and be deleted.

Maybe we could either validate orderOfUpgrade as a complete/valid list for the configured node types, or treat it as a priority list and append any remaining configured node types in the default deterministic order.

}

scaledServiceSpecsByNodeType := map[string][]*ServiceGroup{}
for _, t := range druidServicesOrder {
scaledServiceSpecsByNodeType[t] = []*ServiceGroup{}
groupsByNodeType := map[string][]*ServiceGroup{}
for _, t := range nodeTypeOrder {
groupsByNodeType[t] = []*ServiceGroup{}
}

for key, nodeSpec := range m.Spec.Nodes {
scaledServiceSpec := scaledServiceSpecsByNodeType[nodeSpec.NodeType]
scaledServiceSpecsByNodeType[nodeSpec.NodeType] = append(scaledServiceSpec, &ServiceGroup{key: key, spec: nodeSpec})
sg := &ServiceGroup{
key: key,
nodeType: nodeSpec.NodeType,
tier: nodeSpec.Tier,
spec: nodeSpec,
}
groupsByNodeType[nodeSpec.NodeType] = append(groupsByNodeType[nodeSpec.NodeType], sg)
}

allScaledServiceSpecs := make([]*ServiceGroup, 0, len(m.Spec.Nodes))
for nodeType, groups := range groupsByNodeType {
tierOrder := m.Spec.OrderOfUpgradeOfTiers[nodeType]
sortServiceGroups(groups, tierOrder)
groupsByNodeType[nodeType] = groups
}

for _, t := range druidServicesOrder {
allScaledServiceSpecs = append(allScaledServiceSpecs, scaledServiceSpecsByNodeType[t]...)
result := make([]*ServiceGroup, 0, len(m.Spec.Nodes))
for _, t := range nodeTypeOrder {
result = append(result, groupsByNodeType[t]...)
}

return allScaledServiceSpecs
return result
}

func sortServiceGroups(groups []*ServiceGroup, tierOrder []string) {
Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sortServiceGroups sorts nodes of the same node type to determine their rollout order. tierRank map will convert the tier order slice ["hot", "cold", "glacier"] into a lookup: {"hot": 0, "cold": 1, "glacier": 2}.

if len(groups) <= 1 {
return
}

tierRank := make(map[string]int, len(tierOrder))
for i, t := range tierOrder {
tierRank[t] = i
}

sort.SliceStable(groups, func(i, j int) bool {
gi, gj := groups[i], groups[j]

if len(tierOrder) > 0 {
ri, okI := tierRank[gi.tier]
rj, okJ := tierRank[gj.tier]

switch {
case okI && okJ && ri != rj:
return ri < rj
case okI && !okJ:
return true
case !okI && okJ:
return false
}
}

return gi.key < gj.key
})
}
48 changes: 40 additions & 8 deletions controllers/druid/ordering_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,21 +29,16 @@ import (

// +kubebuilder:docs-gen:collapse=Imports

/*
ordering_test
*/
var _ = Describe("Test ordering logic", func() {
const (
filePath = "testdata/ordering.yaml"
timeout = time.Second * 45
interval = time.Millisecond * 250
)

var (
druid = &druidv1alpha1.Druid{}
)
Context("When creating a druid cluster with multiple nodes (default order)", func() {
const filePath = "testdata/ordering.yaml"
var druid = &druidv1alpha1.Druid{}

Context("When creating a druid cluster with multiple nodes", func() {
It("Should create the druid object", func() {
By("Creating a new druid")
druidCR, err := readDruidClusterSpecFromFile(filePath)
Expand All @@ -68,4 +63,41 @@ var _ = Describe("Test ordering logic", func() {
Expect(orderedServiceGroups[7].key).Should(Equal("routers"))
})
})

Context("When creating a druid cluster with custom order and tiers", func() {
const filePath = "testdata/ordering-tiers.yaml"
var druid = &druidv1alpha1.Druid{}

It("Should create the druid object", func() {
By("Creating a new druid")
druidCR, err := readDruidClusterSpecFromFile(filePath)
Expect(err).Should(BeNil())
Expect(k8sClient.Create(ctx, druidCR)).To(Succeed())

By("Getting a newly created druid")
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: druidCR.Name, Namespace: druidCR.Namespace}, druid)
return err == nil
}, timeout, interval).Should(BeTrue())
})
It("Should return nodes ordered by custom nodeType order and tier order", func() {
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we add a small regression test around incomplete or invalid orderOfUpgrade inputs?

The happy path for tier ordering is covered, but the more risky behavior seems to be partial, unknown, or duplicate node type entries. A test showing that remaining configured nodes are still reconciled, or that the spec is rejected during validation, would make this safer to evolve.

orderedServiceGroups := getNodeSpecsByOrder(druid)
Expect(orderedServiceGroups).Should(HaveLen(6))
// historical:hot first, then historical:cold, then historical:glacier
Expect(orderedServiceGroups[0].key).Should(Equal("historical-az1"))
Expect(orderedServiceGroups[0].tier).Should(Equal("hot"))
Expect(orderedServiceGroups[1].key).Should(Equal("historical-az2"))
Expect(orderedServiceGroups[1].tier).Should(Equal("cold"))
Expect(orderedServiceGroups[2].key).Should(Equal("historical-az3"))
Expect(orderedServiceGroups[2].tier).Should(Equal("glacier"))
// broker:hot then broker:cold
Expect(orderedServiceGroups[3].key).Should(Equal("broker-az1"))
Expect(orderedServiceGroups[3].tier).Should(Equal("hot"))
Expect(orderedServiceGroups[4].key).Should(Equal("broker-az2"))
Expect(orderedServiceGroups[4].tier).Should(Equal("cold"))
// coordinator (no tier)
Expect(orderedServiceGroups[5].key).Should(Equal("coordinators"))
Expect(orderedServiceGroups[5].tier).Should(Equal(""))
})
})
})
111 changes: 111 additions & 0 deletions controllers/druid/testdata/ordering-tiers.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
apiVersion: druid.apache.org/v1alpha1
kind: Druid
metadata:
name: ordering-tiers
namespace: default
spec:
image: apache/druid:25.0.0
startScript: /druid.sh
rollingDeploy: false
securityContext:
fsGroup: 1000
runAsUser: 1000
runAsGroup: 1000
services:
- spec:
type: ClusterIP
commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common"
jvm.options: |-
-server
-XX:MaxDirectMemorySize=10240g
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=/druid/data
common.runtime.properties: |-
druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/druid/data/derbydb/metadata.db;create=true
druid.metadata.storage.connector.host=localhost
druid.metadata.storage.connector.port=1527
druid.metadata.storage.connector.createTables=true
druid.storage.type=local
druid.storage.storageDirectory=/druid/deepstorage
druid.selectors.indexing.serviceName=druid/overlord
druid.selectors.coordinator.serviceName=druid/coordinator
orderOfUpgrade:
- historical
- broker
- coordinator
orderOfUpgradeOfTiers:
historical:
- hot
- cold
- glacier
broker:
- hot
- cold
nodes:
historical-az1:
nodeType: "historical"
tier: "hot"
druid.port: 8080
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical"
replicas: 1
runtime.properties: |-
druid.service=druid/historical
historical-az2:
nodeType: "historical"
tier: "cold"
druid.port: 8080
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical"
replicas: 1
runtime.properties: |-
druid.service=druid/historical
historical-az3:
nodeType: "historical"
tier: "glacier"
druid.port: 8080
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical"
replicas: 1
runtime.properties: |-
druid.service=druid/historical
broker-az1:
nodeType: "broker"
tier: "hot"
druid.port: 8088
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker"
replicas: 1
runtime.properties: |-
druid.service=druid/broker
broker-az2:
nodeType: "broker"
tier: "cold"
druid.port: 8088
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker"
replicas: 1
runtime.properties: |-
druid.service=druid/broker
coordinators:
nodeType: "coordinator"
druid.port: 8080
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/coordinator-overlord"
replicas: 1
runtime.properties: |-
druid.service=druid/coordinator
Loading