diff --git a/docs/resources/elasticsearch_index_lifecycle.md b/docs/resources/elasticsearch_index_lifecycle.md
index 06ff3271f..91cf5463c 100644
--- a/docs/resources/elasticsearch_index_lifecycle.md
+++ b/docs/resources/elasticsearch_index_lifecycle.md
@@ -62,13 +62,13 @@ resource "elasticstack_elasticsearch_index_lifecycle" "my_ilm" {
### Optional
-- `cold` (Block List, Max: 1) The index is no longer being updated and is queried infrequently. The information still needs to be searchable, but it’s okay if those queries are slower. (see [below for nested schema](#nestedblock--cold))
-- `delete` (Block List, Max: 1) The index is no longer needed and can safely be removed. (see [below for nested schema](#nestedblock--delete))
-- `elasticsearch_connection` (Block List, Max: 1, Deprecated) Elasticsearch connection configuration block. This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead. (see [below for nested schema](#nestedblock--elasticsearch_connection))
-- `frozen` (Block List, Max: 1) The index is no longer being updated and is queried rarely. The information still needs to be searchable, but it’s okay if those queries are extremely slow. (see [below for nested schema](#nestedblock--frozen))
-- `hot` (Block List, Max: 1) The index is actively being updated and queried. (see [below for nested schema](#nestedblock--hot))
+- `cold` (Block, Optional) The index is no longer being updated and is queried infrequently. The information still needs to be searchable, but it's okay if those queries are slower. (see [below for nested schema](#nestedblock--cold))
+- `delete` (Block, Optional) The index is no longer needed and can safely be removed. (see [below for nested schema](#nestedblock--delete))
+- `elasticsearch_connection` (Block List, Deprecated) Elasticsearch connection configuration block. (see [below for nested schema](#nestedblock--elasticsearch_connection))
+- `frozen` (Block, Optional) The index is no longer being updated and is queried rarely. The information still needs to be searchable, but it's okay if those queries are extremely slow. (see [below for nested schema](#nestedblock--frozen))
+- `hot` (Block, Optional) The index is actively being updated and queried. (see [below for nested schema](#nestedblock--hot))
- `metadata` (String) Optional user metadata about the ilm policy. Must be valid JSON document.
-- `warm` (Block List, Max: 1) The index is no longer being updated but is still being queried. (see [below for nested schema](#nestedblock--warm))
+- `warm` (Block, Optional) The index is no longer being updated but is still being queried. (see [below for nested schema](#nestedblock--warm))
### Read-Only
@@ -80,15 +80,15 @@ resource "elasticstack_elasticsearch_index_lifecycle" "my_ilm" {
Optional:
-- `allocate` (Block List, Max: 1) Updates the index settings to change which nodes are allowed to host the index shards and change the number of replicas. (see [below for nested schema](#nestedblock--cold--allocate))
-- `downsample` (Block List, Max: 1) Roll up documents within a fixed interval to a single summary document. Reduces the index footprint by storing time series data at reduced granularity. (see [below for nested schema](#nestedblock--cold--downsample))
-- `freeze` (Block List, Max: 1) Freeze the index to minimize its memory footprint. (see [below for nested schema](#nestedblock--cold--freeze))
-- `migrate` (Block List, Max: 1) Moves the index to the data tier that corresponds to the current phase by updating the "index.routing.allocation.include._tier_preference" index setting. (see [below for nested schema](#nestedblock--cold--migrate))
+- `allocate` (Block, Optional) Updates the index settings to change which nodes are allowed to host the index shards and change the number of replicas. (see [below for nested schema](#nestedblock--cold--allocate))
+- `downsample` (Block, Optional) Roll up documents within a fixed interval to a single summary document. Reduces the index footprint by storing time series data at reduced granularity. (see [below for nested schema](#nestedblock--cold--downsample))
+- `freeze` (Block, Optional) Freeze the index to minimize its memory footprint. (see [below for nested schema](#nestedblock--cold--freeze))
+- `migrate` (Block, Optional) Moves the index to the data tier that corresponds to the current phase by updating the "index.routing.allocation.include._tier_preference" index setting. (see [below for nested schema](#nestedblock--cold--migrate))
- `min_age` (String) ILM moves indices through the lifecycle according to their age. To control the timing of these transitions, you set a minimum age for each phase.
-- `readonly` (Block List, Max: 1) Makes the index read-only. (see [below for nested schema](#nestedblock--cold--readonly))
-- `searchable_snapshot` (Block List, Max: 1) Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot. (see [below for nested schema](#nestedblock--cold--searchable_snapshot))
-- `set_priority` (Block List, Max: 1) Sets the priority of the index as soon as the policy enters the hot, warm, or cold phase. Higher priority indices are recovered before indices with lower priorities following a node restart. Default priority is 1. (see [below for nested schema](#nestedblock--cold--set_priority))
-- `unfollow` (Block List, Max: 1) Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action. (see [below for nested schema](#nestedblock--cold--unfollow))
+- `readonly` (Block, Optional) Makes the index read-only. (see [below for nested schema](#nestedblock--cold--readonly))
+- `searchable_snapshot` (Block, Optional) Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot. (see [below for nested schema](#nestedblock--cold--searchable_snapshot))
+- `set_priority` (Block, Optional) Sets the priority of the index as soon as the policy enters the hot, warm, or cold phase. Higher priority indices are recovered before indices with lower priorities following a node restart. Default priority is 1. (see [below for nested schema](#nestedblock--cold--set_priority))
+- `unfollow` (Block, Optional) Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action. (see [below for nested schema](#nestedblock--cold--unfollow))
### Nested Schema for `cold.allocate`
@@ -105,13 +105,10 @@ Optional:
### Nested Schema for `cold.downsample`
-Required:
-
-- `fixed_interval` (String) Downsampling interval
-
Optional:
-- `wait_timeout` (String) Downsampling interval
+- `fixed_interval` (String) Downsampling interval. Required when the `downsample` action is configured.
+- `wait_timeout` (String) Maximum time to wait for the downsample operation to complete before timing out.
@@ -141,21 +138,18 @@ Optional:
### Nested Schema for `cold.searchable_snapshot`
-Required:
-
-- `snapshot_repository` (String) Repository used to store the snapshot.
-
Optional:
- `force_merge_index` (Boolean) Force merges the managed index to one segment.
+- `snapshot_repository` (String) Repository used to store the snapshot. Required when the `searchable_snapshot` action is configured.
### Nested Schema for `cold.set_priority`
-Required:
+Optional:
-- `priority` (Number) The priority for the index. Must be 0 or greater.
+- `priority` (Number) The priority for the index. Must be 0 or greater. Required when the `set_priority` action is configured.
@@ -172,9 +166,9 @@ Optional:
Optional:
-- `delete` (Block List, Max: 1) Permanently removes the index. (see [below for nested schema](#nestedblock--delete--delete))
+- `delete` (Block, Optional) Permanently removes the index. (see [below for nested schema](#nestedblock--delete--delete))
- `min_age` (String) ILM moves indices through the lifecycle according to their age. To control the timing of these transitions, you set a minimum age for each phase.
-- `wait_for_snapshot` (Block List, Max: 1) Waits for the specified SLM policy to be executed before removing the index. This ensures that a snapshot of the deleted index is available. (see [below for nested schema](#nestedblock--delete--wait_for_snapshot))
+- `wait_for_snapshot` (Block, Optional) Waits for the specified SLM policy to be executed before removing the index. This ensures that a snapshot of the deleted index is available. (see [below for nested schema](#nestedblock--delete--wait_for_snapshot))
### Nested Schema for `delete.delete`
@@ -187,9 +181,9 @@ Optional:
### Nested Schema for `delete.wait_for_snapshot`
-Required:
+Optional:
-- `policy` (String) Name of the SLM policy that the delete action should wait for.
+- `policy` (String) Name of the SLM policy that the delete action should wait for. Required when the `wait_for_snapshot` action is configured.
@@ -220,18 +214,15 @@ Optional:
Optional:
- `min_age` (String) ILM moves indices through the lifecycle according to their age. To control the timing of these transitions, you set a minimum age for each phase.
-- `searchable_snapshot` (Block List, Max: 1) Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot. (see [below for nested schema](#nestedblock--frozen--searchable_snapshot))
+- `searchable_snapshot` (Block, Optional) Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot. (see [below for nested schema](#nestedblock--frozen--searchable_snapshot))
### Nested Schema for `frozen.searchable_snapshot`
-Required:
-
-- `snapshot_repository` (String) Repository used to store the snapshot.
-
Optional:
- `force_merge_index` (Boolean) Force merges the managed index to one segment.
+- `snapshot_repository` (String) Repository used to store the snapshot. Required when the `searchable_snapshot` action is configured.
@@ -240,38 +231,32 @@ Optional:
Optional:
-- `downsample` (Block List, Max: 1) Roll up documents within a fixed interval to a single summary document. Reduces the index footprint by storing time series data at reduced granularity. (see [below for nested schema](#nestedblock--hot--downsample))
-- `forcemerge` (Block List, Max: 1) Force merges the index into the specified maximum number of segments. This action makes the index read-only. (see [below for nested schema](#nestedblock--hot--forcemerge))
+- `downsample` (Block, Optional) Roll up documents within a fixed interval to a single summary document. Reduces the index footprint by storing time series data at reduced granularity. (see [below for nested schema](#nestedblock--hot--downsample))
+- `forcemerge` (Block, Optional) Force merges the index into the specified maximum number of segments. This action makes the index read-only. (see [below for nested schema](#nestedblock--hot--forcemerge))
- `min_age` (String) ILM moves indices through the lifecycle according to their age. To control the timing of these transitions, you set a minimum age for each phase.
-- `readonly` (Block List, Max: 1) Makes the index read-only. (see [below for nested schema](#nestedblock--hot--readonly))
-- `rollover` (Block List, Max: 1) Rolls over a target to a new index when the existing index meets one or more of the rollover conditions. (see [below for nested schema](#nestedblock--hot--rollover))
-- `searchable_snapshot` (Block List, Max: 1) Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot. (see [below for nested schema](#nestedblock--hot--searchable_snapshot))
-- `set_priority` (Block List, Max: 1) Sets the priority of the index as soon as the policy enters the hot, warm, or cold phase. Higher priority indices are recovered before indices with lower priorities following a node restart. Default priority is 1. (see [below for nested schema](#nestedblock--hot--set_priority))
-- `shrink` (Block List, Max: 1) Sets a source index to read-only and shrinks it into a new index with fewer primary shards. (see [below for nested schema](#nestedblock--hot--shrink))
-- `unfollow` (Block List, Max: 1) Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action. (see [below for nested schema](#nestedblock--hot--unfollow))
+- `readonly` (Block, Optional) Makes the index read-only. (see [below for nested schema](#nestedblock--hot--readonly))
+- `rollover` (Block, Optional) Rolls over a target to a new index when the existing index meets one or more of the rollover conditions. (see [below for nested schema](#nestedblock--hot--rollover))
+- `searchable_snapshot` (Block, Optional) Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot. (see [below for nested schema](#nestedblock--hot--searchable_snapshot))
+- `set_priority` (Block, Optional) Sets the priority of the index as soon as the policy enters the hot, warm, or cold phase. Higher priority indices are recovered before indices with lower priorities following a node restart. Default priority is 1. (see [below for nested schema](#nestedblock--hot--set_priority))
+- `shrink` (Block, Optional) Sets a source index to read-only and shrinks it into a new index with fewer primary shards. (see [below for nested schema](#nestedblock--hot--shrink))
+- `unfollow` (Block, Optional) Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action. (see [below for nested schema](#nestedblock--hot--unfollow))
### Nested Schema for `hot.downsample`
-Required:
-
-- `fixed_interval` (String) Downsampling interval
-
Optional:
-- `wait_timeout` (String) Downsampling interval
+- `fixed_interval` (String) Downsampling interval. Required when the `downsample` action is configured.
+- `wait_timeout` (String) Maximum time to wait for the downsample operation to complete before timing out.
### Nested Schema for `hot.forcemerge`
-Required:
-
-- `max_num_segments` (Number) Number of segments to merge to. To fully merge the index, set to 1.
-
Optional:
- `index_codec` (String) Codec used to compress the document store.
+- `max_num_segments` (Number) Number of segments to merge to. To fully merge the index, set to 1. Required when the `forcemerge` action is configured.
@@ -302,21 +287,18 @@ Optional:
### Nested Schema for `hot.searchable_snapshot`
-Required:
-
-- `snapshot_repository` (String) Repository used to store the snapshot.
-
Optional:
- `force_merge_index` (Boolean) Force merges the managed index to one segment.
+- `snapshot_repository` (String) Repository used to store the snapshot. Required when the `searchable_snapshot` action is configured.
### Nested Schema for `hot.set_priority`
-Required:
+Optional:
-- `priority` (Number) The priority for the index. Must be 0 or greater.
+- `priority` (Number) The priority for the index. Must be 0 or greater. Required when the `set_priority` action is configured.
@@ -343,15 +325,15 @@ Optional:
Optional:
-- `allocate` (Block List, Max: 1) Updates the index settings to change which nodes are allowed to host the index shards and change the number of replicas. (see [below for nested schema](#nestedblock--warm--allocate))
-- `downsample` (Block List, Max: 1) Roll up documents within a fixed interval to a single summary document. Reduces the index footprint by storing time series data at reduced granularity. (see [below for nested schema](#nestedblock--warm--downsample))
-- `forcemerge` (Block List, Max: 1) Force merges the index into the specified maximum number of segments. This action makes the index read-only. (see [below for nested schema](#nestedblock--warm--forcemerge))
-- `migrate` (Block List, Max: 1) Moves the index to the data tier that corresponds to the current phase by updating the "index.routing.allocation.include._tier_preference" index setting. (see [below for nested schema](#nestedblock--warm--migrate))
+- `allocate` (Block, Optional) Updates the index settings to change which nodes are allowed to host the index shards and change the number of replicas. (see [below for nested schema](#nestedblock--warm--allocate))
+- `downsample` (Block, Optional) Roll up documents within a fixed interval to a single summary document. Reduces the index footprint by storing time series data at reduced granularity. (see [below for nested schema](#nestedblock--warm--downsample))
+- `forcemerge` (Block, Optional) Force merges the index into the specified maximum number of segments. This action makes the index read-only. (see [below for nested schema](#nestedblock--warm--forcemerge))
+- `migrate` (Block, Optional) Moves the index to the data tier that corresponds to the current phase by updating the "index.routing.allocation.include._tier_preference" index setting. (see [below for nested schema](#nestedblock--warm--migrate))
- `min_age` (String) ILM moves indices through the lifecycle according to their age. To control the timing of these transitions, you set a minimum age for each phase.
-- `readonly` (Block List, Max: 1) Makes the index read-only. (see [below for nested schema](#nestedblock--warm--readonly))
-- `set_priority` (Block List, Max: 1) Sets the priority of the index as soon as the policy enters the hot, warm, or cold phase. Higher priority indices are recovered before indices with lower priorities following a node restart. Default priority is 1. (see [below for nested schema](#nestedblock--warm--set_priority))
-- `shrink` (Block List, Max: 1) Sets a source index to read-only and shrinks it into a new index with fewer primary shards. (see [below for nested schema](#nestedblock--warm--shrink))
-- `unfollow` (Block List, Max: 1) Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action. (see [below for nested schema](#nestedblock--warm--unfollow))
+- `readonly` (Block, Optional) Makes the index read-only. (see [below for nested schema](#nestedblock--warm--readonly))
+- `set_priority` (Block, Optional) Sets the priority of the index as soon as the policy enters the hot, warm, or cold phase. Higher priority indices are recovered before indices with lower priorities following a node restart. Default priority is 1. (see [below for nested schema](#nestedblock--warm--set_priority))
+- `shrink` (Block, Optional) Sets a source index to read-only and shrinks it into a new index with fewer primary shards. (see [below for nested schema](#nestedblock--warm--shrink))
+- `unfollow` (Block, Optional) Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action. (see [below for nested schema](#nestedblock--warm--unfollow))
### Nested Schema for `warm.allocate`
@@ -368,25 +350,19 @@ Optional:
### Nested Schema for `warm.downsample`
-Required:
-
-- `fixed_interval` (String) Downsampling interval
-
Optional:
-- `wait_timeout` (String) Downsampling interval
+- `fixed_interval` (String) Downsampling interval. Required when the `downsample` action is configured.
+- `wait_timeout` (String) Maximum time to wait for the downsample operation to complete before timing out.
### Nested Schema for `warm.forcemerge`
-Required:
-
-- `max_num_segments` (Number) Number of segments to merge to. To fully merge the index, set to 1.
-
Optional:
- `index_codec` (String) Codec used to compress the document store.
+- `max_num_segments` (Number) Number of segments to merge to. To fully merge the index, set to 1. Required when the `forcemerge` action is configured.
@@ -408,9 +384,9 @@ Optional:
### Nested Schema for `warm.set_priority`
-Required:
+Optional:
-- `priority` (Number) The priority for the index. Must be 0 or greater.
+- `priority` (Number) The priority for the index. Must be 0 or greater. Required when the `set_priority` action is configured.
diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go
index 413a01fe7..1dbcdff11 100644
--- a/internal/clients/elasticsearch/index.go
+++ b/internal/clients/elasticsearch/index.go
@@ -33,80 +33,69 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
)
-func PutIlm(ctx context.Context, apiClient *clients.APIClient, policy *models.Policy) diag.Diagnostics {
- var diags diag.Diagnostics
+func PutIlm(ctx context.Context, apiClient *clients.APIClient, policy *models.Policy) fwdiags.Diagnostics {
policyBytes, err := json.Marshal(map[string]any{"policy": policy})
if err != nil {
- return diag.FromErr(err)
+ return fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
esClient, err := apiClient.GetESClient()
if err != nil {
- return diag.FromErr(err)
+ return fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
req := esClient.ILM.PutLifecycle.WithBody(bytes.NewReader(policyBytes))
res, err := esClient.ILM.PutLifecycle(policy.Name, req, esClient.ILM.PutLifecycle.WithContext(ctx))
if err != nil {
- return diag.FromErr(err)
+ return fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
defer res.Body.Close()
- if diags := diagutil.CheckError(res, "Unable to create or update the ILM policy"); diags.HasError() {
- return diags
- }
- return diags
+ return diagutil.CheckErrorFromFW(res, "Unable to create or update the ILM policy")
}
-func GetIlm(ctx context.Context, apiClient *clients.APIClient, policyName string) (*models.PolicyDefinition, diag.Diagnostics) {
- var diags diag.Diagnostics
+func GetIlm(ctx context.Context, apiClient *clients.APIClient, policyName string) (*models.PolicyDefinition, fwdiags.Diagnostics) {
esClient, err := apiClient.GetESClient()
if err != nil {
- return nil, diag.FromErr(err)
+ return nil, fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
req := esClient.ILM.GetLifecycle.WithPolicy(policyName)
res, err := esClient.ILM.GetLifecycle(req, esClient.ILM.GetLifecycle.WithContext(ctx))
if err != nil {
- return nil, diag.FromErr(err)
+ return nil, fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return nil, nil
}
- if diags := diagutil.CheckError(res, "Unable to fetch ILM policy from the cluster."); diags.HasError() {
+ if diags := diagutil.CheckErrorFromFW(res, "Unable to fetch ILM policy from the cluster."); diags.HasError() {
return nil, diags
}
- // our API response
ilm := make(map[string]models.PolicyDefinition)
if err := json.NewDecoder(res.Body).Decode(&ilm); err != nil {
- return nil, diag.FromErr(err)
+ return nil, fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
- if ilm, ok := ilm[policyName]; ok {
- return &ilm, diags
+ if def, ok := ilm[policyName]; ok {
+ return &def, nil
+ }
+ return nil, fwdiags.Diagnostics{
+ fwdiags.NewErrorDiagnostic(
+ "Unable to find a ILM policy in the cluster",
+ fmt.Sprintf(`Unable to find "%s" ILM policy in the cluster`, policyName),
+ ),
}
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to find a ILM policy in the cluster",
- Detail: fmt.Sprintf(`Unable to find "%s" ILM policy in the cluster`, policyName),
- })
- return nil, diags
}
-func DeleteIlm(ctx context.Context, apiClient *clients.APIClient, policyName string) diag.Diagnostics {
- var diags diag.Diagnostics
-
+func DeleteIlm(ctx context.Context, apiClient *clients.APIClient, policyName string) fwdiags.Diagnostics {
esClient, err := apiClient.GetESClient()
if err != nil {
- return diag.FromErr(err)
+ return fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
res, err := esClient.ILM.DeleteLifecycle(policyName, esClient.ILM.DeleteLifecycle.WithContext(ctx))
if err != nil {
- return diag.FromErr(err)
+ return fwdiags.Diagnostics{fwdiags.NewErrorDiagnostic(err.Error(), err.Error())}
}
defer res.Body.Close()
- if diags := diagutil.CheckError(res, "Unable to delete ILM policy."); diags.HasError() {
- return diags
- }
- return diags
+ return diagutil.CheckErrorFromFW(res, "Unable to delete ILM policy.")
}
func PutComponentTemplate(ctx context.Context, apiClient *clients.APIClient, template *models.ComponentTemplate) diag.Diagnostics {
diff --git a/internal/elasticsearch/index/descriptions.go b/internal/elasticsearch/index/descriptions.go
index e0693ee9e..90f58ccb4 100644
--- a/internal/elasticsearch/index/descriptions.go
+++ b/internal/elasticsearch/index/descriptions.go
@@ -33,9 +33,3 @@ var componentTemplateSettingsDescription string
//go:embed descriptions/index_template_resource.md
var indexTemplateResourceDescription string
-
-//go:embed descriptions/ilm_resource.md
-var ilmResourceDescription string
-
-//go:embed descriptions/ilm_set_priority_action.md
-var ilmSetPriorityActionDescription string
diff --git a/internal/elasticsearch/index/ilm.go b/internal/elasticsearch/index/ilm.go
deleted file mode 100644
index df9f9751b..000000000
--- a/internal/elasticsearch/index/ilm.go
+++ /dev/null
@@ -1,792 +0,0 @@
-// Licensed to Elasticsearch B.V. under one or more contributor
-// license agreements. See the NOTICE file distributed with
-// this work for additional information regarding copyright
-// ownership. Elasticsearch B.V. licenses this file to you under
-// the Apache License, Version 2.0 (the "License"); you may
-// not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package index
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/elastic/terraform-provider-elasticstack/internal/clients"
- "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch"
- "github.com/elastic/terraform-provider-elasticstack/internal/models"
- "github.com/elastic/terraform-provider-elasticstack/internal/tfsdkutils"
- "github.com/elastic/terraform-provider-elasticstack/internal/utils"
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/terraform-plugin-log/tflog"
- "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
- "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
- "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
-)
-
-var supportedIlmPhases = [...]string{"hot", "warm", "cold", "frozen", "delete"}
-
-func ResourceIlm() *schema.Resource {
- ilmSchema := map[string]*schema.Schema{
- "id": {
- Description: "Internal identifier of the resource",
- Type: schema.TypeString,
- Computed: true,
- },
- "name": {
- Description: "Identifier for the policy.",
- Type: schema.TypeString,
- Required: true,
- ForceNew: true,
- },
- "metadata": {
- Description: "Optional user metadata about the ilm policy. Must be valid JSON document.",
- Type: schema.TypeString,
- Optional: true,
- ValidateFunc: validation.StringIsJSON,
- DiffSuppressFunc: tfsdkutils.DiffJSONSuppress,
- },
- "hot": {
- Description: "The index is actively being updated and queried.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- AtLeastOneOf: []string{"hot", "warm", "cold", "frozen", "delete"},
- Elem: &schema.Resource{
- Schema: getSchema("set_priority", "unfollow", "rollover", "readonly", "shrink", "forcemerge", "searchable_snapshot", "downsample"),
- },
- },
- "warm": {
- Description: "The index is no longer being updated but is still being queried.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- AtLeastOneOf: []string{"hot", "warm", "cold", "frozen", "delete"},
- Elem: &schema.Resource{
- Schema: getSchema("set_priority", "unfollow", "readonly", "allocate", "migrate", "shrink", "forcemerge", "downsample"),
- },
- },
- "cold": {
- Description: "The index is no longer being updated and is queried infrequently. The information still needs to be searchable, but it’s okay if those queries are slower.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- AtLeastOneOf: []string{"hot", "warm", "cold", "frozen", "delete"},
- Elem: &schema.Resource{
- Schema: getSchema("set_priority", "unfollow", "readonly", "searchable_snapshot", "allocate", "migrate", "freeze", "downsample"),
- },
- },
- "frozen": {
- Description: "The index is no longer being updated and is queried rarely. The information still needs to be searchable, but it’s okay if those queries are extremely slow.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- AtLeastOneOf: []string{"hot", "warm", "cold", "frozen", "delete"},
- Elem: &schema.Resource{
- Schema: getSchema("searchable_snapshot"),
- },
- },
- "delete": {
- Description: "The index is no longer needed and can safely be removed.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- AtLeastOneOf: []string{"hot", "warm", "cold", "frozen", "delete"},
- Elem: &schema.Resource{
- Schema: getSchema("wait_for_snapshot", "delete"),
- },
- },
- "modified_date": {
- Description: "The DateTime of the last modification.",
- Type: schema.TypeString,
- Computed: true,
- },
- }
-
- schemautil.AddConnectionSchema(ilmSchema)
-
- return &schema.Resource{
- Description: ilmResourceDescription,
-
- CreateContext: resourceIlmPut,
- UpdateContext: resourceIlmPut,
- ReadContext: resourceIlmRead,
- DeleteContext: resourceIlmDelete,
-
- Importer: &schema.ResourceImporter{
- StateContext: schema.ImportStatePassthroughContext,
- },
-
- Schema: ilmSchema,
- }
-}
-
-var supportedActions = map[string]*schema.Schema{
- "allocate": {
- Description: "Updates the index settings to change which nodes are allowed to host the index shards and change the number of replicas.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "number_of_replicas": {
- Description: "Number of replicas to assign to the index. Default: `0`",
- Type: schema.TypeInt,
- Optional: true,
- Default: 0,
- },
- "total_shards_per_node": {
- Description: "The maximum number of shards for the index on a single Elasticsearch node. Defaults to `-1` (unlimited). Supported from Elasticsearch version **7.16**",
- Type: schema.TypeInt,
- Optional: true,
- Default: -1,
- },
- "include": {
- Description: "Assigns an index to nodes that have at least one of the specified custom attributes. Must be valid JSON document.",
- Type: schema.TypeString,
- Optional: true,
- ValidateFunc: validation.StringIsJSON,
- DiffSuppressFunc: tfsdkutils.DiffJSONSuppress,
- Default: "{}",
- },
- "exclude": {
- Description: "Assigns an index to nodes that have none of the specified custom attributes. Must be valid JSON document.",
- Type: schema.TypeString,
- Optional: true,
- ValidateFunc: validation.StringIsJSON,
- DiffSuppressFunc: tfsdkutils.DiffJSONSuppress,
- Default: "{}",
- },
- "require": {
- Description: "Assigns an index to nodes that have all of the specified custom attributes. Must be valid JSON document.",
- Type: schema.TypeString,
- Optional: true,
- ValidateFunc: validation.StringIsJSON,
- DiffSuppressFunc: tfsdkutils.DiffJSONSuppress,
- Default: "{}",
- },
- },
- },
- },
- "delete": {
- Description: "Permanently removes the index.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "delete_searchable_snapshot": {
- Description: "Deletes the searchable snapshot created in a previous phase.",
- Type: schema.TypeBool,
- Optional: true,
- Default: true,
- },
- },
- },
- },
- "forcemerge": {
- Description: "Force merges the index into the specified maximum number of segments. This action makes the index read-only.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "max_num_segments": {
- Description: "Number of segments to merge to. To fully merge the index, set to 1.",
- Type: schema.TypeInt,
- Required: true,
- ValidateFunc: validation.IntAtLeast(1),
- },
- "index_codec": {
- Description: "Codec used to compress the document store.",
- Type: schema.TypeString,
- Optional: true,
- },
- },
- },
- },
- "freeze": {
- Description: "Freeze the index to minimize its memory footprint.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "enabled": {
- Description: "Controls whether ILM freezes the index.",
- Type: schema.TypeBool,
- Optional: true,
- Default: true,
- },
- },
- },
- },
- "migrate": {
- Description: `Moves the index to the data tier that corresponds to the current phase by updating the "index.routing.allocation.include._tier_preference" index setting.`,
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "enabled": {
- Description: "Controls whether ILM automatically migrates the index during this phase.",
- Type: schema.TypeBool,
- Optional: true,
- Default: true,
- },
- },
- },
- },
- "readonly": {
- Description: "Makes the index read-only.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "enabled": {
- Description: "Controls whether ILM makes the index read-only.",
- Type: schema.TypeBool,
- Optional: true,
- Default: true,
- },
- },
- },
- },
- "rollover": {
- Description: "Rolls over a target to a new index when the existing index meets one or more of the rollover conditions.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "max_age": {
- Description: "Triggers rollover after the maximum elapsed time from index creation is reached.",
- Type: schema.TypeString,
- Optional: true,
- },
- "max_docs": {
- Description: "Triggers rollover after the specified maximum number of documents is reached.",
- Type: schema.TypeInt,
- Optional: true,
- },
- "max_size": {
- Description: "Triggers rollover when the index reaches a certain size.",
- Type: schema.TypeString,
- Optional: true,
- },
- "max_primary_shard_docs": {
- Description: "Triggers rollover when the largest primary shard in the index reaches a certain number of documents. Supported from Elasticsearch version **8.2**",
- Type: schema.TypeInt,
- Optional: true,
- },
- "max_primary_shard_size": {
- Description: "Triggers rollover when the largest primary shard in the index reaches a certain size.",
- Type: schema.TypeString,
- Optional: true,
- },
- "min_age": {
- Description: "Prevents rollover until after the minimum elapsed time from index creation is reached. Supported from Elasticsearch version **8.4**",
- Type: schema.TypeString,
- Optional: true,
- },
- "min_docs": {
- Description: "Prevents rollover until after the specified minimum number of documents is reached. Supported from Elasticsearch version **8.4**",
- Type: schema.TypeInt,
- Optional: true,
- },
- "min_size": {
- Description: "Prevents rollover until the index reaches a certain size.",
- Type: schema.TypeString,
- Optional: true,
- },
- "min_primary_shard_docs": {
- Description: "Prevents rollover until the largest primary shard in the index reaches a certain number of documents. Supported from Elasticsearch version **8.4**",
- Type: schema.TypeInt,
- Optional: true,
- },
- "min_primary_shard_size": {
- Description: "Prevents rollover until the largest primary shard in the index reaches a certain size. Supported from Elasticsearch version **8.4**",
- Type: schema.TypeString,
- Optional: true,
- },
- },
- },
- },
- "searchable_snapshot": {
- Description: "Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "snapshot_repository": {
- Description: "Repository used to store the snapshot.",
- Type: schema.TypeString,
- Required: true,
- },
- "force_merge_index": {
- Description: "Force merges the managed index to one segment.",
- Type: schema.TypeBool,
- Optional: true,
- Default: true,
- },
- },
- },
- },
- "set_priority": {
- Description: ilmSetPriorityActionDescription,
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "priority": {
- Description: "The priority for the index. Must be 0 or greater.",
- Type: schema.TypeInt,
- Required: true,
- ValidateFunc: validation.IntAtLeast(0),
- },
- },
- },
- },
- "shrink": {
- Description: "Sets a source index to read-only and shrinks it into a new index with fewer primary shards.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "number_of_shards": {
- Description: "Number of shards to shrink to.",
- Type: schema.TypeInt,
- Optional: true,
- },
- "max_primary_shard_size": {
- Description: "The max primary shard size for the target index.",
- Type: schema.TypeString,
- Optional: true,
- },
- "allow_write_after_shrink": {
- Description: "If true, the shrunken index is made writable by removing the write block.",
- Type: schema.TypeBool,
- Optional: true,
- },
- },
- },
- },
- "unfollow": {
- Description: "Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "enabled": {
- Description: "Controls whether ILM makes the follower index a regular one.",
- Type: schema.TypeBool,
- Optional: true,
- Default: true,
- },
- },
- },
- },
- "wait_for_snapshot": {
- Description: "Waits for the specified SLM policy to be executed before removing the index. This ensures that a snapshot of the deleted index is available.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "policy": {
- Description: "Name of the SLM policy that the delete action should wait for.",
- Type: schema.TypeString,
- Required: true,
- },
- },
- },
- },
- "downsample": {
- Description: "Roll up documents within a fixed interval to a single summary document. Reduces the index footprint by storing time series data at reduced granularity.",
- Type: schema.TypeList,
- Optional: true,
- MaxItems: 1,
- Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{
- "fixed_interval": {
- Description: "Downsampling interval",
- Type: schema.TypeString,
- Required: true,
- },
- "wait_timeout": {
- Description: "Downsampling interval",
- Type: schema.TypeString,
- Optional: true,
- Computed: true,
- },
- },
- },
- },
-}
-
-func getSchema(actions ...string) map[string]*schema.Schema {
- sch := make(map[string]*schema.Schema)
- for _, a := range actions {
- if action, ok := supportedActions[a]; ok {
- sch[a] = action
- }
- }
- // min age can be set for all the phases
- sch["min_age"] = &schema.Schema{
- Description: "ILM moves indices through the lifecycle according to their age. To control the timing of these transitions, you set a minimum age for each phase.",
- Type: schema.TypeString,
- Optional: true,
- Computed: true,
- }
- return sch
-}
-
-func resourceIlmPut(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
- client, diags := clients.NewAPIClientFromSDKResource(d, meta)
- if diags.HasError() {
- return diags
- }
- ilmID := d.Get("name").(string)
- id, diags := client.ID(ctx, ilmID)
- if diags.HasError() {
- return diags
- }
-
- serverVersion, diags := client.ServerVersion(ctx)
- if diags.HasError() {
- return diags
- }
-
- policy, diags := expandIlmPolicy(d, serverVersion)
- if diags.HasError() {
- return diags
- }
- policy.Name = ilmID
-
- if diags := elasticsearch.PutIlm(ctx, client, policy); diags.HasError() {
- return diags
- }
-
- d.SetId(id.String())
- return resourceIlmRead(ctx, d, meta)
-}
-
-func expandIlmPolicy(d *schema.ResourceData, serverVersion *version.Version) (*models.Policy, diag.Diagnostics) {
- var diags diag.Diagnostics
- var policy models.Policy
- phases := make(map[string]models.Phase)
-
- policy.Name = d.Get("name").(string)
-
- if v, ok := d.GetOk("metadata"); ok {
- metadata := make(map[string]any)
- if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&metadata); err != nil {
- return nil, diag.FromErr(err)
- }
- policy.Metadata = metadata
- }
-
- for _, ph := range supportedIlmPhases {
- if v, ok := d.GetOk(ph); ok {
- phase, diags := expandPhase(v.([]any)[0].(map[string]any), serverVersion)
- if diags.HasError() {
- return nil, diags
- }
- phases[ph] = *phase
- }
- }
-
- policy.Phases = phases
- return &policy, diags
-}
-
-func expandPhase(p map[string]any, serverVersion *version.Version) (*models.Phase, diag.Diagnostics) {
- var diags diag.Diagnostics
- var phase models.Phase
-
- if v := p["min_age"].(string); v != "" {
- phase.MinAge = v
- }
- delete(p, "min_age")
-
- actions := make(map[string]models.Action)
- for actionName, action := range p {
- if a := action.([]any); len(a) > 0 {
- switch actionName {
- case "allocate":
- actions[actionName], diags = expandAction(a, serverVersion, "number_of_replicas", "total_shards_per_node", "include", "exclude", "require")
- case "delete":
- actions[actionName], diags = expandAction(a, serverVersion, "delete_searchable_snapshot")
- case "forcemerge":
- actions[actionName], diags = expandAction(a, serverVersion, "max_num_segments", "index_codec")
- case "freeze":
- if a[0] != nil {
- ac := a[0].(map[string]any)
- if ac["enabled"].(bool) {
- actions[actionName], diags = expandAction(a, serverVersion)
- }
- }
- case "migrate":
- actions[actionName], diags = expandAction(a, serverVersion, "enabled")
- case "readonly":
- if a[0] != nil {
- ac := a[0].(map[string]any)
- if ac["enabled"].(bool) {
- actions[actionName], diags = expandAction(a, serverVersion)
- }
- }
- case "rollover":
- actions[actionName], diags = expandAction(
- a,
- serverVersion,
- "max_age",
- "max_docs",
- "max_size",
- "max_primary_shard_docs",
- "max_primary_shard_size",
- "min_age",
- "min_docs",
- "min_size",
- "min_primary_shard_docs",
- "min_primary_shard_size",
- )
- case "searchable_snapshot":
- actions[actionName], diags = expandAction(a, serverVersion, "snapshot_repository", "force_merge_index")
- case "set_priority":
- actions[actionName], diags = expandAction(a, serverVersion, "priority")
- case "shrink":
- actions[actionName], diags = expandAction(a, serverVersion, "number_of_shards", "max_primary_shard_size", "allow_write_after_shrink")
- case "unfollow":
- if a[0] != nil {
- ac := a[0].(map[string]any)
- if ac["enabled"].(bool) {
- actions[actionName], diags = expandAction(a, serverVersion)
- }
- }
- case "wait_for_snapshot":
- actions[actionName], diags = expandAction(a, serverVersion, "policy")
- case "downsample":
- actions[actionName], diags = expandAction(a, serverVersion, "fixed_interval", "wait_timeout")
- default:
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unknown action defined.",
- Detail: fmt.Sprintf(`Configured action "%s" is not supported`, actionName),
- })
- return nil, diags
- }
- }
- }
-
- phase.Actions = actions
- return &phase, diags
-}
-
-var (
- RolloverMinConditionsMinSupportedVersion = version.Must(version.NewVersion("8.4.0"))
- MaxPrimaryShardDocsMinSupportedVersion = version.Must(version.NewVersion("8.2.0"))
-)
-
-var ilmActionSettingOptions = map[string]struct {
- skipEmptyCheck bool
- def any
- minVersion *version.Version
-}{
- "allow_write_after_shrink": {def: false, minVersion: version.Must(version.NewVersion("8.14.0"))},
- "number_of_replicas": {skipEmptyCheck: true},
- "priority": {skipEmptyCheck: true},
- "max_primary_shard_docs": {def: 0, minVersion: MaxPrimaryShardDocsMinSupportedVersion},
- "min_age": {def: "", minVersion: RolloverMinConditionsMinSupportedVersion},
- "min_docs": {def: 0, minVersion: RolloverMinConditionsMinSupportedVersion},
- "min_size": {def: "", minVersion: RolloverMinConditionsMinSupportedVersion},
- "min_primary_shard_docs": {def: 0, minVersion: RolloverMinConditionsMinSupportedVersion},
- "min_primary_shard_size": {def: "", minVersion: RolloverMinConditionsMinSupportedVersion},
- "total_shards_per_node": {skipEmptyCheck: true, def: -1, minVersion: version.Must(version.NewVersion("7.16.0"))},
-}
-
-func expandAction(a []any, serverVersion *version.Version, settings ...string) (map[string]any, diag.Diagnostics) {
- var diags diag.Diagnostics
- def := make(map[string]any)
-
- if action := a[0]; action != nil {
- for _, setting := range settings {
- if v, ok := action.(map[string]any)[setting]; ok && v != nil {
- options := ilmActionSettingOptions[setting]
-
- if options.minVersion != nil && options.minVersion.GreaterThan(serverVersion) {
- if v != options.def {
- return nil, diag.Errorf("[%s] is not supported in the target Elasticsearch server. Remove the setting from your module definition or set it to the default [%s] value", setting, options.def)
- }
-
- // This setting is not supported, and shouldn't be set in the ILM policy object
- continue
- }
-
- if options.skipEmptyCheck || !schemautil.IsEmpty(v) {
- // these 3 fields must be treated as JSON objects
- if setting == "include" || setting == "exclude" || setting == "require" {
- res := make(map[string]any)
- if err := json.Unmarshal([]byte(v.(string)), &res); err != nil {
- return nil, diag.FromErr(err)
- }
- def[setting] = res
- } else {
- def[setting] = v
- }
- }
- }
- }
- }
- return def, diags
-}
-
-func resourceIlmRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
- client, diags := clients.NewAPIClientFromSDKResource(d, meta)
- if diags.HasError() {
- return diags
- }
-
- id := d.Id()
- compID, diags := clients.CompositeIDFromStr(id)
- if diags.HasError() {
- return diags
- }
- policyID := compID.ResourceID
-
- ilmDef, diags := elasticsearch.GetIlm(ctx, client, policyID)
- if ilmDef == nil && diags == nil {
- tflog.Warn(ctx, fmt.Sprintf(`ILM policy "%s" not found, removing from state`, compID.ResourceID))
- d.SetId("")
- return diags
- }
- if diags.HasError() {
- return diags
- }
-
- if err := d.Set("modified_date", ilmDef.Modified); err != nil {
- return diag.FromErr(err)
- }
- if ilmDef.Policy.Metadata != nil {
- metadata, err := json.Marshal(ilmDef.Policy.Metadata)
- if err != nil {
- return diag.FromErr(err)
- }
- if err := d.Set("metadata", string(metadata)); err != nil {
- return diag.FromErr(err)
- }
- }
- if err := d.Set("name", policyID); err != nil {
- return diag.FromErr(err)
- }
- for _, ph := range supportedIlmPhases {
- if v, ok := ilmDef.Policy.Phases[ph]; ok {
- phase, diags := flattenPhase(ph, v, d)
- if diags.HasError() {
- return diags
- }
- if err := d.Set(ph, phase); err != nil {
- return diag.FromErr(err)
- }
- }
- }
-
- return diags
-}
-
-func flattenPhase(phaseName string, p models.Phase, d *schema.ResourceData) (any, diag.Diagnostics) {
- var diags diag.Diagnostics
- out := make([]any, 1)
- phase := make(map[string]any)
- enabled := make(map[string]any)
- ns := make(map[string]any)
-
- _, phaseConfigNew := d.GetChange(phaseName)
-
- if phaseConfigNew != nil && len(phaseConfigNew.([]any)) > 0 {
- ns = phaseConfigNew.([]any)[0].(map[string]any)
- }
-
- existsAndNotEmpty := func(key string, m map[string]any) bool {
- if v, ok := m[key]; ok && len(v.([]any)) > 0 {
- return true
- }
- return false
- }
- for _, aCase := range []string{"readonly", "freeze", "unfollow"} {
- if existsAndNotEmpty(aCase, ns) {
- enabled["enabled"] = false
- phase[aCase] = []any{enabled}
- }
- }
-
- if p.MinAge != "" {
- phase["min_age"] = p.MinAge
- }
- for actionName, action := range p.Actions {
- switch actionName {
- case "readonly", "freeze", "unfollow":
- enabled["enabled"] = true
- phase[actionName] = []any{enabled}
- case "allocate":
- allocateAction := make(map[string]any)
- if v, ok := action["number_of_replicas"]; ok {
- allocateAction["number_of_replicas"] = v
- }
- if v, ok := action["total_shards_per_node"]; ok {
- allocateAction["total_shards_per_node"] = v
- } else {
- // Specify the default for total_shards_per_node. This avoids an endless diff loop for ES 7.15 or lower which don't support this setting
- allocateAction["total_shards_per_node"] = -1
- }
- for _, f := range []string{"include", "require", "exclude"} {
- if v, ok := action[f]; ok {
- res, err := json.Marshal(v)
- if err != nil {
- return nil, diag.FromErr(err)
- }
- allocateAction[f] = string(res)
- }
- }
- phase[actionName] = []any{allocateAction}
- default:
- phase[actionName] = []any{action}
- }
- }
- out[0] = phase
- return out, diags
-}
-
-func resourceIlmDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
- client, diags := clients.NewAPIClientFromSDKResource(d, meta)
- if diags.HasError() {
- return diags
- }
-
- id := d.Id()
- compID, diags := clients.CompositeIDFromStr(id)
- if diags.HasError() {
- return diags
- }
-
- if diags := elasticsearch.DeleteIlm(ctx, client, compID.ResourceID); diags.HasError() {
- return diags
- }
-
- return diags
-}
diff --git a/internal/elasticsearch/index/ilm_test.go b/internal/elasticsearch/index/ilm/acc_test.go
similarity index 56%
rename from internal/elasticsearch/index/ilm_test.go
rename to internal/elasticsearch/index/ilm/acc_test.go
index a7135f6a2..6b588fd05 100644
--- a/internal/elasticsearch/index/ilm_test.go
+++ b/internal/elasticsearch/index/ilm/acc_test.go
@@ -15,15 +15,16 @@
// specific language governing permissions and limitations
// under the License.
-package index_test
+package ilm_test
import (
+ _ "embed"
"fmt"
"testing"
"github.com/elastic/terraform-provider-elasticstack/internal/acctest"
"github.com/elastic/terraform-provider-elasticstack/internal/clients"
- "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index"
+ "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/ilm"
"github.com/elastic/terraform-provider-elasticstack/internal/versionutils"
"github.com/hashicorp/go-version"
"github.com/hashicorp/terraform-plugin-testing/config"
@@ -35,9 +36,9 @@ import (
var totalShardsPerNodeVersionLimit = version.Must(version.NewVersion("7.16.0"))
var downsampleNoTimeoutVersionLimit = version.Must(version.NewVersion("8.5.0"))
var downsampleVersionLimit = version.Must(version.NewVersion("8.10.0"))
+var shrinkAllowWriteVersionLimit = version.Must(version.NewVersion("8.14.0"))
func TestAccResourceILM(t *testing.T) {
- // generate a random policy name
policyName := sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum)
resource.Test(t, resource.TestCase{
@@ -52,17 +53,17 @@ func TestAccResourceILM(t *testing.T) {
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.min_age", "1h"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.set_priority.0.priority", "10"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.rollover.0.max_age", "1d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.readonly.0.enabled", "true"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.0.min_age", "0ms"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.0.delete.0.delete_searchable_snapshot", "true"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.#", "0"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age", "1h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.set_priority.priority", "10"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.rollover.max_age", "1d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.readonly.enabled", "true"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.min_age", "0ms"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "delete.min_age"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.delete.delete_searchable_snapshot", "true"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.min_age"),
resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "modified_date"),
),
},
@@ -74,22 +75,21 @@ func TestAccResourceILM(t *testing.T) {
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.min_age", "1h"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.set_priority.0.priority", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.rollover.0.max_age", "2d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.readonly.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.min_age", "0ms"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.set_priority.0.priority", "60"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.readonly.0.enabled", "true"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.allocate.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.allocate.0.number_of_replicas", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.allocate.0.exclude", `{"box_type":"hot"}`),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.shrink.0.max_primary_shard_size", "50gb"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.#", "0"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age", "1h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.set_priority.priority", "0"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.rollover.max_age", "2d"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.readonly.enabled"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.min_age"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "warm.min_age"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.min_age", "0ms"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.set_priority.priority", "60"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.readonly.enabled", "true"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.allocate.number_of_replicas", "1"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.allocate.exclude", `{"box_type":"hot"}`),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.shrink.max_primary_shard_size", "50gb"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.min_age"),
resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "modified_date"),
),
},
@@ -102,13 +102,12 @@ func TestAccResourceILM(t *testing.T) {
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.min_age", "0ms"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.set_priority.0.priority", "60"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.readonly.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.allocate.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.allocate.0.number_of_replicas", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.0.allocate.0.total_shards_per_node", "200"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "warm.min_age"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.min_age", "0ms"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.set_priority.priority", "60"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.readonly.enabled", "true"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.allocate.number_of_replicas", "1"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.allocate.total_shards_per_node", "200"),
),
},
{
@@ -120,17 +119,17 @@ func TestAccResourceILM(t *testing.T) {
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.min_age", "1h"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.set_priority.0.priority", "10"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.downsample.0.fixed_interval", "1d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.rollover.0.max_age", "1d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.readonly.0.enabled", "true"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.0.min_age", "0ms"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.#", "0"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age", "1h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.set_priority.priority", "10"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.downsample.fixed_interval", "1d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.rollover.max_age", "1d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.readonly.enabled", "true"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.min_age", "0ms"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "delete.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.min_age"),
),
},
{
@@ -142,18 +141,18 @@ func TestAccResourceILM(t *testing.T) {
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.min_age", "1h"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.set_priority.0.priority", "10"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.downsample.0.fixed_interval", "1d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.downsample.0.wait_timeout", "1d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.rollover.0.max_age", "1d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.readonly.0.enabled", "true"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.0.min_age", "0ms"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.#", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.#", "0"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age", "1h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.set_priority.priority", "10"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.downsample.fixed_interval", "1d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.downsample.wait_timeout", "1d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.rollover.max_age", "1d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.readonly.enabled", "true"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "delete.min_age", "0ms"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test", "delete.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "warm.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "cold.min_age"),
+ resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "frozen.min_age"),
),
},
},
@@ -161,7 +160,6 @@ func TestAccResourceILM(t *testing.T) {
}
func TestAccResourceILMRolloverConditions(t *testing.T) {
- // generate a random policy name
policyName := sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum)
resource.Test(t, resource.TestCase{
@@ -170,35 +168,35 @@ func TestAccResourceILMRolloverConditions(t *testing.T) {
Steps: []resource.TestStep{
{
ProtoV6ProviderFactories: acctest.Providers,
- SkipFunc: versionutils.CheckIfVersionIsUnsupported(index.MaxPrimaryShardDocsMinSupportedVersion),
+ SkipFunc: versionutils.CheckIfVersionIsUnsupported(ilm.MaxPrimaryShardDocsMinSupportedVersion),
ConfigDirectory: acctest.NamedTestCaseDirectory("max_primary_shard_docs"),
ConfigVariables: config.Variables{
"policy_name": config.StringVariable(policyName),
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.max_primary_shard_docs", "5000"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.max_primary_shard_docs", "5000"),
),
},
{
ProtoV6ProviderFactories: acctest.Providers,
- SkipFunc: versionutils.CheckIfVersionIsUnsupported(index.RolloverMinConditionsMinSupportedVersion),
+ SkipFunc: versionutils.CheckIfVersionIsUnsupported(ilm.RolloverMinConditionsMinSupportedVersion),
ConfigDirectory: acctest.NamedTestCaseDirectory("rollover_conditions"),
ConfigVariables: config.Variables{
"policy_name": config.StringVariable(policyName),
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.max_age", "7d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.max_docs", "10000"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.max_size", "100gb"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.max_primary_shard_docs", "5000"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.max_primary_shard_size", "50gb"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.min_age", "3d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.min_docs", "1000"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.min_size", "50gb"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.min_primary_shard_docs", "500"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.0.rollover.0.min_primary_shard_size", "25gb"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.max_age", "7d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.max_docs", "10000"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.max_size", "100gb"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.max_primary_shard_docs", "5000"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.max_primary_shard_size", "50gb"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.min_age", "3d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.min_docs", "1000"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.min_size", "50gb"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.min_primary_shard_docs", "500"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_rollover", "hot.rollover.min_primary_shard_size", "25gb"),
),
},
},
@@ -286,14 +284,12 @@ func TestAccResourceILMColdPhase(t *testing.T) {
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.0.min_age", "30d"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.0.set_priority.0.priority", "0"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.0.readonly.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.0.readonly.0.enabled", "true"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.0.allocate.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.0.allocate.0.number_of_replicas", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.0.allocate.0.include", `{"box_type":"cold"}`),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.min_age"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.min_age", "30d"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.set_priority.priority", "0"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.readonly.enabled", "true"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.allocate.number_of_replicas", "1"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_cold", "cold.allocate.include", `{"box_type":"cold"}`),
resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test_cold", "modified_date"),
),
},
@@ -316,13 +312,157 @@ func TestAccResourceILMForcemerge(t *testing.T) {
},
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "name", policyName),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "warm.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "warm.0.forcemerge.#", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "warm.0.forcemerge.0.max_num_segments", "1"),
- resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "warm.0.forcemerge.0.index_codec", "best_compression"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "warm.min_age"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "warm.forcemerge.max_num_segments", "1"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "warm.forcemerge.index_codec", "best_compression"),
resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_index_lifecycle.test_forcemerge", "modified_date"),
),
},
},
})
}
+
+//go:embed testdata/TestAccResourceILMFromSDK/create/resource.tf
+var sdkILMCreateConfig string
+
+func TestAccResourceILMFromSDK(t *testing.T) {
+ policyName := sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ CheckDestroy: checkResourceILMDestroy,
+ Steps: []resource.TestStep{
+ {
+ ExternalProviders: map[string]resource.ExternalProvider{
+ "elasticstack": {
+ Source: "elastic/elasticstack",
+ VersionConstraint: "0.14.3",
+ },
+ },
+ Config: sdkILMCreateConfig,
+ ConfigVariables: config.Variables{
+ "policy_name": config.StringVariable(policyName),
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "name", policyName),
+ // SDK provider (<1) uses list-shaped nested blocks in state (hot.0.*).
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.min_age", "1h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.0.set_priority.0.priority", "10"),
+ ),
+ },
+ {
+ ProtoV6ProviderFactories: acctest.Providers,
+ ConfigDirectory: acctest.NamedTestCaseDirectory("create"),
+ ConfigVariables: config.Variables{
+ "policy_name": config.StringVariable(policyName),
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "name", policyName),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.min_age", "1h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test", "hot.set_priority.priority", "10"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccResourceILM_importBasic(t *testing.T) {
+ policyName := sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ CheckDestroy: checkResourceILMDestroy,
+ Steps: []resource.TestStep{
+ {
+ ProtoV6ProviderFactories: acctest.Providers,
+ ConfigDirectory: acctest.NamedTestCaseDirectory("create"),
+ ConfigVariables: config.Variables{
+ "policy_name": config.StringVariable(policyName),
+ },
+ },
+ {
+ ProtoV6ProviderFactories: acctest.Providers,
+ ConfigDirectory: acctest.NamedTestCaseDirectory("create"),
+ ConfigVariables: config.Variables{
+ "policy_name": config.StringVariable(policyName),
+ },
+ ResourceName: "elasticstack_elasticsearch_index_lifecycle.test",
+ ImportState: true,
+ ImportStateIdFunc: func(s *terraform.State) (string, error) {
+ rs, ok := s.RootModule().Resources["elasticstack_elasticsearch_index_lifecycle.test"]
+ if !ok {
+ return "", fmt.Errorf("expected resource not found")
+ }
+ return rs.Primary.ID, nil
+ },
+ },
+ },
+ })
+}
+
+func TestAccResourceILM_warmMigrateDisabled(t *testing.T) {
+ policyName := sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ CheckDestroy: checkResourceILMDestroy,
+ Steps: []resource.TestStep{
+ {
+ ProtoV6ProviderFactories: acctest.Providers,
+ ConfigDirectory: acctest.NamedTestCaseDirectory("migrate_warm"),
+ ConfigVariables: config.Variables{
+ "policy_name": config.StringVariable(policyName),
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_migrate", "name", policyName),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_migrate", "warm.migrate.enabled", "false"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccResourceILM_shrinkAllowWriteAfterShrink(t *testing.T) {
+ policyName := sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ CheckDestroy: checkResourceILMDestroy,
+ Steps: []resource.TestStep{
+ {
+ ProtoV6ProviderFactories: acctest.Providers,
+ SkipFunc: versionutils.CheckIfVersionIsUnsupported(shrinkAllowWriteVersionLimit),
+ ConfigDirectory: acctest.NamedTestCaseDirectory("shrink_write"),
+ ConfigVariables: config.Variables{
+ "policy_name": config.StringVariable(policyName),
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_shrink", "name", policyName),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_shrink", "hot.shrink.allow_write_after_shrink", "true"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccResourceILM_hotReadonlyDisabled(t *testing.T) {
+ policyName := sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ CheckDestroy: checkResourceILMDestroy,
+ Steps: []resource.TestStep{
+ {
+ ProtoV6ProviderFactories: acctest.Providers,
+ ConfigDirectory: acctest.NamedTestCaseDirectory("readonly_disabled"),
+ ConfigVariables: config.Variables{
+ "policy_name": config.StringVariable(policyName),
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_ro", "name", policyName),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_index_lifecycle.test_ro", "hot.readonly.enabled", "false"),
+ ),
+ },
+ },
+ })
+}
diff --git a/internal/elasticsearch/index/ilm/attr_types.go b/internal/elasticsearch/index/ilm/attr_types.go
new file mode 100644
index 000000000..29609919e
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/attr_types.go
@@ -0,0 +1,235 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func setPriorityObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "priority": types.Int64Type,
+ },
+ }
+}
+
+func unfollowObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "enabled": types.BoolType,
+ },
+ }
+}
+
+func rolloverObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "max_age": types.StringType,
+ "max_docs": types.Int64Type,
+ "max_size": types.StringType,
+ "max_primary_shard_docs": types.Int64Type,
+ "max_primary_shard_size": types.StringType,
+ "min_age": types.StringType,
+ "min_docs": types.Int64Type,
+ "min_size": types.StringType,
+ "min_primary_shard_docs": types.Int64Type,
+ "min_primary_shard_size": types.StringType,
+ },
+ }
+}
+
+func readonlyObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "enabled": types.BoolType,
+ },
+ }
+}
+
+func shrinkObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "number_of_shards": types.Int64Type,
+ "max_primary_shard_size": types.StringType,
+ "allow_write_after_shrink": types.BoolType,
+ },
+ }
+}
+
+func forcemergeObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "max_num_segments": types.Int64Type,
+ "index_codec": types.StringType,
+ },
+ }
+}
+
+func searchableSnapshotObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "snapshot_repository": types.StringType,
+ "force_merge_index": types.BoolType,
+ },
+ }
+}
+
+func downsampleObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "fixed_interval": types.StringType,
+ "wait_timeout": types.StringType,
+ },
+ }
+}
+
+func allocateObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "number_of_replicas": types.Int64Type,
+ "total_shards_per_node": types.Int64Type,
+ "include": jsontypes.NormalizedType{},
+ "exclude": jsontypes.NormalizedType{},
+ "require": jsontypes.NormalizedType{},
+ },
+ }
+}
+
+func migrateObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "enabled": types.BoolType,
+ },
+ }
+}
+
+func freezeObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "enabled": types.BoolType,
+ },
+ }
+}
+
+func deleteActionObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "delete_searchable_snapshot": types.BoolType,
+ },
+ }
+}
+
+func waitForSnapshotObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "policy": types.StringType,
+ },
+ }
+}
+
+func hotPhaseObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "min_age": types.StringType,
+ "set_priority": setPriorityObjectType(),
+ "unfollow": unfollowObjectType(),
+ "rollover": rolloverObjectType(),
+ "readonly": readonlyObjectType(),
+ "shrink": shrinkObjectType(),
+ "forcemerge": forcemergeObjectType(),
+ "searchable_snapshot": searchableSnapshotObjectType(),
+ "downsample": downsampleObjectType(),
+ },
+ }
+}
+
+func warmPhaseObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "min_age": types.StringType,
+ "set_priority": setPriorityObjectType(),
+ "unfollow": unfollowObjectType(),
+ "readonly": readonlyObjectType(),
+ "allocate": allocateObjectType(),
+ "migrate": migrateObjectType(),
+ "shrink": shrinkObjectType(),
+ "forcemerge": forcemergeObjectType(),
+ "downsample": downsampleObjectType(),
+ },
+ }
+}
+
+func coldPhaseObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "min_age": types.StringType,
+ "set_priority": setPriorityObjectType(),
+ "unfollow": unfollowObjectType(),
+ "readonly": readonlyObjectType(),
+ "searchable_snapshot": searchableSnapshotObjectType(),
+ "allocate": allocateObjectType(),
+ "migrate": migrateObjectType(),
+ "freeze": freezeObjectType(),
+ "downsample": downsampleObjectType(),
+ },
+ }
+}
+
+func frozenPhaseObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "min_age": types.StringType,
+ "searchable_snapshot": searchableSnapshotObjectType(),
+ },
+ }
+}
+
+func deletePhaseObjectType() types.ObjectType {
+ return types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "min_age": types.StringType,
+ "wait_for_snapshot": waitForSnapshotObjectType(),
+ ilmPhaseDelete: deleteActionObjectType(),
+ },
+ }
+}
+
+func phaseObjectType(phaseName string) types.ObjectType {
+ switch phaseName {
+ case ilmPhaseHot:
+ return hotPhaseObjectType()
+ case ilmPhaseWarm:
+ return warmPhaseObjectType()
+ case ilmPhaseCold:
+ return coldPhaseObjectType()
+ case ilmPhaseFrozen:
+ return frozenPhaseObjectType()
+ case ilmPhaseDelete:
+ return deletePhaseObjectType()
+ default:
+ return types.ObjectType{AttrTypes: map[string]attr.Type{}}
+ }
+}
+
+func phaseObjectNull(phaseName string) types.Object {
+ ot := phaseObjectType(phaseName)
+ return types.ObjectNull(ot.AttrTypes)
+}
diff --git a/internal/elasticsearch/index/ilm/create.go b/internal/elasticsearch/index/ilm/create.go
new file mode 100644
index 000000000..eda256855
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/create.go
@@ -0,0 +1,81 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch"
+ "github.com/elastic/terraform-provider-elasticstack/internal/diagutil"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ var plan tfModel
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ client, diags := clients.MaybeNewAPIClientFromFrameworkResource(ctx, plan.ElasticsearchConnection, r.client)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ sv, diags := serverVersionFW(ctx, client)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ policy, diags := policyFromModel(ctx, &plan, sv)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ policy.Name = plan.Name.ValueString()
+
+ resp.Diagnostics.Append(elasticsearch.PutIlm(ctx, client, policy)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ id, sdkDiags := client.ID(ctx, plan.Name.ValueString())
+ resp.Diagnostics.Append(diagutil.FrameworkDiagsFromSDK(sdkDiags)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ prior := plan
+ prior.ID = types.StringValue(id.String())
+
+ out, diags := readFull(ctx, client, plan.Name.ValueString(), &prior)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ if out == nil {
+ resp.Diagnostics.AddError("ILM policy missing after create", plan.Name.ValueString())
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, out)...)
+}
diff --git a/internal/elasticsearch/index/ilm/delete.go b/internal/elasticsearch/index/ilm/delete.go
new file mode 100644
index 000000000..041b279b5
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/delete.go
@@ -0,0 +1,48 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+)
+
+func (r *Resource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ var state tfModel
+ resp.Diagnostics.Append(req.State.Get(ctx, &state)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ compID, diags := clients.CompositeIDFromStrFw(state.ID.ValueString())
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ client, diags := clients.MaybeNewAPIClientFromFrameworkResource(ctx, state.ElasticsearchConnection, r.client)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ resp.Diagnostics.Append(elasticsearch.DeleteIlm(ctx, client, compID.ResourceID)...)
+}
diff --git a/internal/elasticsearch/index/descriptions/ilm_resource.md b/internal/elasticsearch/index/ilm/descriptions/ilm_resource.md
similarity index 100%
rename from internal/elasticsearch/index/descriptions/ilm_resource.md
rename to internal/elasticsearch/index/ilm/descriptions/ilm_resource.md
diff --git a/internal/elasticsearch/index/descriptions/ilm_set_priority_action.md b/internal/elasticsearch/index/ilm/descriptions/ilm_set_priority_action.md
similarity index 100%
rename from internal/elasticsearch/index/descriptions/ilm_set_priority_action.md
rename to internal/elasticsearch/index/ilm/descriptions/ilm_set_priority_action.md
diff --git a/internal/elasticsearch/index/ilm/expand.go b/internal/elasticsearch/index/ilm/expand.go
new file mode 100644
index 000000000..359a27415
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/expand.go
@@ -0,0 +1,198 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/models"
+ schemautil "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+)
+
+var ilmActionSettingOptions = map[string]struct {
+ skipEmptyCheck bool
+ def any
+ minVersion *version.Version
+}{
+ "allow_write_after_shrink": {def: false, minVersion: version.Must(version.NewVersion("8.14.0"))},
+ "number_of_replicas": {skipEmptyCheck: true},
+ "priority": {skipEmptyCheck: true},
+ "max_primary_shard_docs": {def: 0, minVersion: MaxPrimaryShardDocsMinSupportedVersion},
+ "min_age": {def: "", minVersion: RolloverMinConditionsMinSupportedVersion},
+ "min_docs": {def: 0, minVersion: RolloverMinConditionsMinSupportedVersion},
+ "min_size": {def: "", minVersion: RolloverMinConditionsMinSupportedVersion},
+ "min_primary_shard_docs": {def: 0, minVersion: RolloverMinConditionsMinSupportedVersion},
+ "min_primary_shard_size": {def: "", minVersion: RolloverMinConditionsMinSupportedVersion},
+ "total_shards_per_node": {skipEmptyCheck: true, def: -1, minVersion: version.Must(version.NewVersion("7.16.0"))},
+}
+
+func expandPhase(p map[string]any, serverVersion *version.Version) (*models.Phase, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ var phase models.Phase
+
+ if v, ok := p["min_age"].(string); ok && v != "" {
+ phase.MinAge = v
+ }
+ delete(p, "min_age")
+
+ actions := make(map[string]models.Action)
+ for actionName, action := range p {
+ if a := action.([]any); len(a) > 0 {
+ switch actionName {
+ case "allocate":
+ actions[actionName], diags = expandAction(a, serverVersion, "number_of_replicas", "total_shards_per_node", "include", "exclude", "require")
+ case ilmPhaseDelete:
+ actions[actionName], diags = expandAction(a, serverVersion, "delete_searchable_snapshot")
+ case "forcemerge":
+ actions[actionName], diags = expandAction(a, serverVersion, "max_num_segments", "index_codec")
+ case "freeze":
+ if a[0] != nil {
+ ac := a[0].(map[string]any)
+ if ac["enabled"].(bool) {
+ actions[actionName], diags = expandAction(a, serverVersion)
+ }
+ }
+ case "migrate":
+ actions[actionName], diags = expandAction(a, serverVersion, "enabled")
+ case "readonly":
+ if a[0] != nil {
+ ac := a[0].(map[string]any)
+ if ac["enabled"].(bool) {
+ actions[actionName], diags = expandAction(a, serverVersion)
+ }
+ }
+ case "rollover":
+ actions[actionName], diags = expandAction(
+ a,
+ serverVersion,
+ "max_age",
+ "max_docs",
+ "max_size",
+ "max_primary_shard_docs",
+ "max_primary_shard_size",
+ "min_age",
+ "min_docs",
+ "min_size",
+ "min_primary_shard_docs",
+ "min_primary_shard_size",
+ )
+ case "searchable_snapshot":
+ actions[actionName], diags = expandAction(a, serverVersion, "snapshot_repository", "force_merge_index")
+ case "set_priority":
+ actions[actionName], diags = expandAction(a, serverVersion, "priority")
+ case "shrink":
+ actions[actionName], diags = expandAction(a, serverVersion, "number_of_shards", "max_primary_shard_size", "allow_write_after_shrink")
+ case "unfollow":
+ if a[0] != nil {
+ ac := a[0].(map[string]any)
+ if ac["enabled"].(bool) {
+ actions[actionName], diags = expandAction(a, serverVersion)
+ }
+ }
+ case "wait_for_snapshot":
+ actions[actionName], diags = expandAction(a, serverVersion, "policy")
+ case "downsample":
+ actions[actionName], diags = expandAction(a, serverVersion, "fixed_interval", "wait_timeout")
+ default:
+ diags.AddError("Unknown action defined.", fmt.Sprintf(`Configured action "%s" is not supported`, actionName))
+ return nil, diags
+ }
+ }
+ if diags.HasError() {
+ return nil, diags
+ }
+ }
+
+ phase.Actions = actions
+ return &phase, diags
+}
+
+func expandAction(a []any, serverVersion *version.Version, settings ...string) (map[string]any, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ def := make(map[string]any)
+
+ if action := a[0]; action != nil {
+ for _, setting := range settings {
+ if v, ok := action.(map[string]any)[setting]; ok && v != nil {
+ options := ilmActionSettingOptions[setting]
+
+ if options.minVersion != nil && options.minVersion.GreaterThan(serverVersion) {
+ if v != options.def {
+ var unsupported diag.Diagnostics
+ unsupported.AddError(
+ "Unsupported ILM setting",
+ fmt.Sprintf("[%s] is not supported in the target Elasticsearch server. Remove the setting from your module definition or set it to the default [%v] value", setting, options.def),
+ )
+ return nil, unsupported
+ }
+ continue
+ }
+
+ if options.skipEmptyCheck || !schemautil.IsEmpty(v) {
+ if setting == "include" || setting == "exclude" || setting == "require" {
+ res := make(map[string]any)
+ if err := json.Unmarshal([]byte(v.(string)), &res); err != nil {
+ diags.AddError("Invalid JSON", err.Error())
+ return nil, diags
+ }
+ def[setting] = res
+ } else {
+ def[setting] = v
+ }
+ }
+ }
+ }
+ }
+ return def, diags
+}
+
+func expandIlmPolicy(name string, metadata string, phases map[string]map[string]any, serverVersion *version.Version) (*models.Policy, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ var policy models.Policy
+
+ policy.Name = name
+
+ if strings.TrimSpace(metadata) != "" {
+ meta := make(map[string]any)
+ if err := json.NewDecoder(strings.NewReader(metadata)).Decode(&meta); err != nil {
+ diags.AddError("Invalid metadata JSON", err.Error())
+ return nil, diags
+ }
+ policy.Metadata = meta
+ }
+
+ outPhases := make(map[string]models.Phase)
+ for ph, raw := range phases {
+ if raw == nil {
+ continue
+ }
+ phase, d := expandPhase(raw, serverVersion)
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ outPhases[ph] = *phase
+ }
+
+ policy.Phases = outPhases
+ return &policy, diags
+}
diff --git a/internal/elasticsearch/index/ilm/flatten.go b/internal/elasticsearch/index/ilm/flatten.go
new file mode 100644
index 000000000..e31382b66
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/flatten.go
@@ -0,0 +1,102 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+ "encoding/json"
+ "maps"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/models"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func priorHasDeclaredToggle(_ context.Context, prior types.Object, toggle string) bool {
+ if prior.IsNull() || prior.IsUnknown() {
+ return false
+ }
+ attrVals := prior.Attributes()
+ v, ok := attrVals[toggle]
+ if !ok || v.IsNull() || v.IsUnknown() {
+ return false
+ }
+ objV, ok := v.(types.Object)
+ if !ok {
+ return false
+ }
+ return !objV.IsNull() && !objV.IsUnknown()
+}
+
+func flattenPhase(ctx context.Context, phaseName string, p models.Phase, prior types.Object) (types.Object, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ phase := make(map[string]any)
+
+ for _, aCase := range []string{"readonly", "freeze", "unfollow"} {
+ if priorHasDeclaredToggle(ctx, prior, aCase) {
+ phase[aCase] = []any{map[string]any{"enabled": false}}
+ }
+ }
+
+ if p.MinAge != "" {
+ phase["min_age"] = p.MinAge
+ }
+ for actionName, action := range p.Actions {
+ switch actionName {
+ case "readonly", "freeze", "unfollow":
+ phase[actionName] = []any{map[string]any{"enabled": true}}
+ case "allocate":
+ allocateAction := make(map[string]any)
+ if v, ok := action["number_of_replicas"]; ok {
+ allocateAction["number_of_replicas"] = v
+ }
+ if v, ok := action["total_shards_per_node"]; ok {
+ allocateAction["total_shards_per_node"] = v
+ } else {
+ allocateAction["total_shards_per_node"] = int64(-1)
+ }
+ for _, f := range []string{"include", "require", "exclude"} {
+ if v, ok := action[f]; ok {
+ res, err := json.Marshal(v)
+ if err != nil {
+ diags.AddError("Failed to marshal allocate filter", err.Error())
+ return types.ObjectUnknown(phaseObjectType(phaseName).AttrTypes), diags
+ }
+ s := string(res)
+ // Omit empty objects so unset optional JSON attrs stay null (matches config).
+ if s != "{}" {
+ allocateAction[f] = s
+ }
+ }
+ }
+ phase[actionName] = []any{allocateAction}
+ case "shrink":
+ shrinkAction := make(map[string]any, len(action)+1)
+ maps.Copy(shrinkAction, map[string]any(action))
+ if _, ok := shrinkAction["allow_write_after_shrink"]; !ok {
+ shrinkAction["allow_write_after_shrink"] = false
+ }
+ phase[actionName] = []any{shrinkAction}
+ default:
+ // models.Action is a named map type; type assertions expect map[string]any.
+ phase[actionName] = []any{map[string]any(action)}
+ }
+ }
+
+ return phaseMapToObjectValue(ctx, phaseName, phase)
+}
diff --git a/internal/elasticsearch/index/ilm/model_expand.go b/internal/elasticsearch/index/ilm/model_expand.go
new file mode 100644
index 000000000..9f5e8b652
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/model_expand.go
@@ -0,0 +1,126 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func phaseObjectToExpandMap(ctx context.Context, phaseObj types.Object) (map[string]any, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ if phaseObj.IsNull() || phaseObj.IsUnknown() {
+ return nil, diags
+ }
+ m, d := objectToExpandMap(ctx, phaseObj)
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ applyAllocateJSONDefaults(m)
+ return m, diags
+}
+
+// applyAllocateJSONDefaults mirrors SDK defaults for allocate JSON strings ({}).
+func applyAllocateJSONDefaults(phase map[string]any) {
+ allocRaw, ok := phase["allocate"]
+ if !ok {
+ return
+ }
+ allocList, ok := allocRaw.([]any)
+ if !ok || len(allocList) == 0 {
+ return
+ }
+ am, ok := allocList[0].(map[string]any)
+ if !ok {
+ return
+ }
+ for _, k := range []string{"include", "exclude", "require"} {
+ if _, has := am[k]; !has {
+ am[k] = "{}"
+ }
+ }
+}
+
+func objectToExpandMap(ctx context.Context, obj types.Object) (map[string]any, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ out := make(map[string]any)
+ for k, v := range obj.Attributes() {
+ if v.IsNull() || v.IsUnknown() {
+ continue
+ }
+ raw, d := attrValueToExpandRaw(ctx, v)
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ if raw != nil {
+ out[k] = raw
+ }
+ }
+ return out, diags
+}
+
+func attrValueToExpandRaw(ctx context.Context, v attr.Value) (any, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ switch tv := v.(type) {
+ case types.String:
+ return tv.ValueString(), diags
+ case types.Int64:
+ return tv.ValueInt64(), diags
+ case types.Bool:
+ return tv.ValueBool(), diags
+ case jsontypes.Normalized:
+ if tv.IsNull() {
+ return nil, diags
+ }
+ return tv.ValueString(), diags
+ case types.Object:
+ if tv.IsNull() || tv.IsUnknown() {
+ return nil, diags
+ }
+ m, d := objectToExpandMap(ctx, tv)
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ return []any{m}, diags
+ case types.List:
+ if len(tv.Elements()) == 0 {
+ return nil, diags
+ }
+ elem := tv.Elements()[0]
+ innerObj, ok := elem.(types.Object)
+ if !ok {
+ diags.AddError("Internal error", "expected object inside list")
+ return nil, diags
+ }
+ m, d := objectToExpandMap(ctx, innerObj)
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ return []any{m}, diags
+ default:
+ return nil, diags
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/models.go b/internal/elasticsearch/index/ilm/models.go
new file mode 100644
index 000000000..524fd8819
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/models.go
@@ -0,0 +1,53 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+type tfModel struct {
+ ID types.String `tfsdk:"id"`
+ ElasticsearchConnection types.List `tfsdk:"elasticsearch_connection"`
+ Name types.String `tfsdk:"name"`
+ Metadata jsontypes.Normalized `tfsdk:"metadata"`
+ Hot types.Object `tfsdk:"hot"`
+ Warm types.Object `tfsdk:"warm"`
+ Cold types.Object `tfsdk:"cold"`
+ Frozen types.Object `tfsdk:"frozen"`
+ Delete types.Object `tfsdk:"delete"`
+ ModifiedDate types.String `tfsdk:"modified_date"`
+}
+
+func (m *tfModel) phaseObject(name string) types.Object {
+ switch name {
+ case ilmPhaseHot:
+ return m.Hot
+ case ilmPhaseWarm:
+ return m.Warm
+ case ilmPhaseCold:
+ return m.Cold
+ case ilmPhaseFrozen:
+ return m.Frozen
+ case ilmPhaseDelete:
+ return m.Delete
+ default:
+ return phaseObjectNull(ilmPhaseHot)
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/policy.go b/internal/elasticsearch/index/ilm/policy.go
new file mode 100644
index 000000000..122cadb5d
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/policy.go
@@ -0,0 +1,140 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch"
+ "github.com/elastic/terraform-provider-elasticstack/internal/diagutil"
+ "github.com/elastic/terraform-provider-elasticstack/internal/models"
+ "github.com/hashicorp/go-version"
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func policyFromModel(ctx context.Context, m *tfModel, serverVersion *version.Version) (*models.Policy, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ meta := ""
+ if !m.Metadata.IsNull() && !m.Metadata.IsUnknown() {
+ meta = m.Metadata.ValueString()
+ }
+ phases := make(map[string]map[string]any)
+ for _, ph := range supportedIlmPhases {
+ po := m.phaseObject(ph)
+ pm, d := phaseObjectToExpandMap(ctx, po)
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ if len(pm) > 0 {
+ phases[ph] = pm
+ }
+ }
+ return expandIlmPolicy(m.Name.ValueString(), meta, phases, serverVersion)
+}
+
+func readPolicyIntoModel(ctx context.Context, ilmDef *models.PolicyDefinition, prior *tfModel, policyName string) (*tfModel, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ out := &tfModel{
+ ID: prior.ID,
+ ElasticsearchConnection: prior.ElasticsearchConnection,
+ Name: types.StringValue(policyName),
+ ModifiedDate: types.StringValue(ilmDef.Modified),
+ }
+
+ if ilmDef.Policy.Metadata != nil {
+ b, err := json.Marshal(ilmDef.Policy.Metadata)
+ if err != nil {
+ diags.AddError("Failed to marshal metadata", err.Error())
+ return nil, diags
+ }
+ out.Metadata = jsontypes.NewNormalizedValue(string(b))
+ } else {
+ out.Metadata = prior.Metadata
+ }
+
+ for _, ph := range supportedIlmPhases {
+ if v, ok := ilmDef.Policy.Phases[ph]; ok {
+ obj, d := flattenPhase(ctx, ph, v, prior.phaseObject(ph))
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ switch ph {
+ case ilmPhaseHot:
+ out.Hot = obj
+ case ilmPhaseWarm:
+ out.Warm = obj
+ case ilmPhaseCold:
+ out.Cold = obj
+ case ilmPhaseFrozen:
+ out.Frozen = obj
+ case ilmPhaseDelete:
+ out.Delete = obj
+ }
+ } else {
+ nullObj := phaseObjectNull(ph)
+ switch ph {
+ case ilmPhaseHot:
+ out.Hot = nullObj
+ case ilmPhaseWarm:
+ out.Warm = nullObj
+ case ilmPhaseCold:
+ out.Cold = nullObj
+ case ilmPhaseFrozen:
+ out.Frozen = nullObj
+ case ilmPhaseDelete:
+ out.Delete = nullObj
+ }
+ }
+ }
+
+ return out, diags
+}
+
+func readFull(ctx context.Context, apiClient *clients.APIClient, policyName string, prior *tfModel) (*tfModel, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ ilmDef, fwDiags := elasticsearch.GetIlm(ctx, apiClient, policyName)
+ diags.Append(fwDiags...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ if ilmDef == nil {
+ tflog.Warn(ctx, fmt.Sprintf(`ILM policy "%s" not found, removing from state`, policyName))
+ return nil, diags
+ }
+ out, d := readPolicyIntoModel(ctx, ilmDef, prior, policyName)
+ diags.Append(d...)
+ return out, diags
+}
+
+func serverVersionFW(ctx context.Context, c *clients.APIClient) (*version.Version, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ sv, sdkd := c.ServerVersion(ctx)
+ diags.Append(diagutil.FrameworkDiagsFromSDK(sdkd)...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ return sv, diags
+}
diff --git a/internal/elasticsearch/index/ilm/read.go b/internal/elasticsearch/index/ilm/read.go
new file mode 100644
index 000000000..6b9a04d50
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/read.go
@@ -0,0 +1,58 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+)
+
+func (r *Resource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ var prior tfModel
+ resp.Diagnostics.Append(req.State.Get(ctx, &prior)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ compID, diags := clients.CompositeIDFromStrFw(prior.ID.ValueString())
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ policyName := compID.ResourceID
+
+ client, diags := clients.MaybeNewAPIClientFromFrameworkResource(ctx, prior.ElasticsearchConnection, r.client)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ out, diags := readFull(ctx, client, policyName, &prior)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ if out == nil {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, out)...)
+}
diff --git a/internal/elasticsearch/index/ilm/resource.go b/internal/elasticsearch/index/ilm/resource.go
new file mode 100644
index 000000000..9c7188bea
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/resource.go
@@ -0,0 +1,64 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+)
+
+var (
+ _ resource.Resource = &Resource{}
+ _ resource.ResourceWithConfigure = &Resource{}
+ _ resource.ResourceWithImportState = &Resource{}
+ _ resource.ResourceWithValidateConfig = &Resource{}
+ _ resource.ResourceWithUpgradeState = &Resource{}
+)
+
+func NewResource() resource.Resource {
+ return &Resource{}
+}
+
+type Resource struct {
+ client *clients.APIClient
+}
+
+func (r *Resource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ client, diags := clients.ConvertProviderData(req.ProviderData)
+ resp.Diagnostics.Append(diags...)
+ r.client = client
+}
+
+func (r *Resource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_elasticsearch_index_lifecycle"
+}
+
+func (r *Resource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
+}
+
+func (r *Resource) UpgradeState(context.Context) map[int64]resource.StateUpgrader {
+ return map[int64]resource.StateUpgrader{
+ 0: {
+ StateUpgrader: migrateILMStateV0ToV1,
+ },
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/schema.go b/internal/elasticsearch/index/ilm/schema.go
new file mode 100644
index 000000000..1a27dc2c6
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/schema.go
@@ -0,0 +1,171 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+ _ "embed"
+
+ esindex "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index"
+ providerschema "github.com/elastic/terraform-provider-elasticstack/internal/schema"
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+)
+
+//go:embed descriptions/ilm_resource.md
+var resourceMarkdownDescription string
+
+func (r *Resource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Version: currentSchemaVersion,
+ MarkdownDescription: resourceMarkdownDescription,
+ Blocks: map[string]schema.Block{
+ "elasticsearch_connection": providerschema.GetEsFWConnectionBlock(false),
+ ilmPhaseHot: phaseHotBlock(),
+ ilmPhaseWarm: phaseWarmBlock(),
+ ilmPhaseCold: phaseColdBlock(),
+ ilmPhaseFrozen: phaseFrozenBlock(),
+ ilmPhaseDelete: phaseDeleteBlock(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: "Internal identifier of the resource",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "name": schema.StringAttribute{
+ Description: "Identifier for the policy.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ },
+ "metadata": schema.StringAttribute{
+ Description: "Optional user metadata about the ilm policy. Must be valid JSON document.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{esindex.StringIsJSONObject{}},
+ },
+ "modified_date": schema.StringAttribute{
+ Description: "The DateTime of the last modification.",
+ Computed: true,
+ },
+ },
+ }
+}
+
+func minAgeAttribute() schema.StringAttribute {
+ return schema.StringAttribute{
+ Description: "ILM moves indices through the lifecycle according to their age. To control the timing of these transitions, you set a minimum age for each phase.",
+ Optional: true,
+ Computed: true,
+ Default: stringdefault.StaticString("0ms"),
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ }
+}
+
+func phaseHotBlock() schema.SingleNestedBlock {
+ return schema.SingleNestedBlock{
+ MarkdownDescription: "The index is actively being updated and queried.",
+ Attributes: map[string]schema.Attribute{
+ "min_age": minAgeAttribute(),
+ },
+ Blocks: map[string]schema.Block{
+ "set_priority": blockSetPriority(),
+ "unfollow": blockUnfollow(),
+ "rollover": blockRollover(),
+ "readonly": blockReadonly(),
+ "shrink": blockShrink(),
+ "forcemerge": blockForcemerge(),
+ "searchable_snapshot": blockSearchableSnapshot(),
+ "downsample": blockDownsample(),
+ },
+ }
+}
+
+func phaseWarmBlock() schema.SingleNestedBlock {
+ return schema.SingleNestedBlock{
+ MarkdownDescription: "The index is no longer being updated but is still being queried.",
+ Attributes: map[string]schema.Attribute{
+ "min_age": minAgeAttribute(),
+ },
+ Blocks: map[string]schema.Block{
+ "set_priority": blockSetPriority(),
+ "unfollow": blockUnfollow(),
+ "readonly": blockReadonly(),
+ "allocate": blockAllocate(),
+ "migrate": blockMigrate(),
+ "shrink": blockShrink(),
+ "forcemerge": blockForcemerge(),
+ "downsample": blockDownsample(),
+ },
+ }
+}
+
+func phaseColdBlock() schema.SingleNestedBlock {
+ return schema.SingleNestedBlock{
+ MarkdownDescription: "The index is no longer being updated and is queried infrequently. The information still needs to be searchable, but it's okay if those queries are slower.",
+ Attributes: map[string]schema.Attribute{
+ "min_age": minAgeAttribute(),
+ },
+ Blocks: map[string]schema.Block{
+ "set_priority": blockSetPriority(),
+ "unfollow": blockUnfollow(),
+ "readonly": blockReadonly(),
+ "searchable_snapshot": blockSearchableSnapshot(),
+ "allocate": blockAllocate(),
+ "migrate": blockMigrate(),
+ "freeze": blockFreeze(),
+ "downsample": blockDownsample(),
+ },
+ }
+}
+
+func phaseFrozenBlock() schema.SingleNestedBlock {
+ return schema.SingleNestedBlock{
+ MarkdownDescription: "The index is no longer being updated and is queried rarely. The information still needs to be searchable, but it's okay if those queries are extremely slow.",
+ Attributes: map[string]schema.Attribute{
+ "min_age": minAgeAttribute(),
+ },
+ Blocks: map[string]schema.Block{
+ "searchable_snapshot": blockSearchableSnapshot(),
+ },
+ }
+}
+
+func phaseDeleteBlock() schema.SingleNestedBlock {
+ return schema.SingleNestedBlock{
+ MarkdownDescription: "The index is no longer needed and can safely be removed.",
+ Attributes: map[string]schema.Attribute{
+ "min_age": minAgeAttribute(),
+ },
+ Blocks: map[string]schema.Block{
+ "wait_for_snapshot": blockWaitForSnapshot(),
+ ilmPhaseDelete: blockDeleteAction(),
+ },
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/schema_actions.go b/internal/elasticsearch/index/ilm/schema_actions.go
new file mode 100644
index 000000000..4a85eb2f0
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/schema_actions.go
@@ -0,0 +1,298 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ _ "embed"
+
+ esindex "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index"
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework-validators/int64validator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+)
+
+//go:embed descriptions/ilm_set_priority_action.md
+var setPriorityActionDescription string
+
+func singleNestedBlock(desc string, nested schema.NestedBlockObject, validators ...validator.Object) schema.SingleNestedBlock {
+ b := schema.SingleNestedBlock{
+ MarkdownDescription: desc,
+ Attributes: nested.Attributes,
+ Blocks: nested.Blocks,
+ }
+ if len(validators) > 0 {
+ b.Validators = validators
+ }
+ return b
+}
+
+func blockAllocate() schema.SingleNestedBlock {
+ return singleNestedBlock("Updates the index settings to change which nodes are allowed to host the index shards and change the number of replicas.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "number_of_replicas": schema.Int64Attribute{
+ Description: "Number of replicas to assign to the index. Default: `0`",
+ Optional: true,
+ Computed: true,
+ Default: int64default.StaticInt64(0),
+ },
+ "total_shards_per_node": schema.Int64Attribute{
+ Description: "The maximum number of shards for the index on a single Elasticsearch node. Defaults to `-1` (unlimited). Supported from Elasticsearch version **7.16**",
+ Optional: true,
+ Computed: true,
+ Default: int64default.StaticInt64(-1),
+ },
+ "include": schema.StringAttribute{
+ Description: "Assigns an index to nodes that have at least one of the specified custom attributes. Must be valid JSON document.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{esindex.StringIsJSONObject{}},
+ },
+ "exclude": schema.StringAttribute{
+ Description: "Assigns an index to nodes that have none of the specified custom attributes. Must be valid JSON document.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{esindex.StringIsJSONObject{}},
+ },
+ "require": schema.StringAttribute{
+ Description: "Assigns an index to nodes that have all of the specified custom attributes. Must be valid JSON document.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ Validators: []validator.String{esindex.StringIsJSONObject{}},
+ },
+ },
+ })
+}
+
+func blockDeleteAction() schema.SingleNestedBlock {
+ return singleNestedBlock("Permanently removes the index.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "delete_searchable_snapshot": schema.BoolAttribute{
+ Description: "Deletes the searchable snapshot created in a previous phase.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(true),
+ },
+ },
+ })
+}
+
+func blockForcemerge() schema.SingleNestedBlock {
+ return singleNestedBlock("Force merges the index into the specified maximum number of segments. This action makes the index read-only.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "max_num_segments": schema.Int64Attribute{
+ Description: "Number of segments to merge to. To fully merge the index, set to 1. Required when the `forcemerge` action is configured.",
+ Optional: true,
+ Validators: []validator.Int64{int64validator.AtLeast(1)},
+ },
+ "index_codec": schema.StringAttribute{
+ Description: "Codec used to compress the document store.",
+ Optional: true,
+ },
+ },
+ }, objectvalidator.AlsoRequires(path.MatchRelative().AtName("max_num_segments")))
+}
+
+func blockFreeze() schema.SingleNestedBlock {
+ return singleNestedBlock("Freeze the index to minimize its memory footprint.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "enabled": schema.BoolAttribute{
+ Description: "Controls whether ILM freezes the index.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(true),
+ },
+ },
+ })
+}
+
+func blockMigrate() schema.SingleNestedBlock {
+ return singleNestedBlock(
+ `Moves the index to the data tier that corresponds to the current phase by updating `+
+ `the "index.routing.allocation.include._tier_preference" index setting.`,
+ schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "enabled": schema.BoolAttribute{
+ Description: "Controls whether ILM automatically migrates the index during this phase.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(true),
+ },
+ },
+ },
+ )
+}
+
+func blockReadonly() schema.SingleNestedBlock {
+ return singleNestedBlock("Makes the index read-only.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "enabled": schema.BoolAttribute{
+ Description: "Controls whether ILM makes the index read-only.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(true),
+ },
+ },
+ })
+}
+
+func blockRollover() schema.SingleNestedBlock {
+ return singleNestedBlock("Rolls over a target to a new index when the existing index meets one or more of the rollover conditions.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "max_age": schema.StringAttribute{
+ Description: "Triggers rollover after the maximum elapsed time from index creation is reached.",
+ Optional: true,
+ },
+ "max_docs": schema.Int64Attribute{
+ Description: "Triggers rollover after the specified maximum number of documents is reached.",
+ Optional: true,
+ },
+ "max_size": schema.StringAttribute{
+ Description: "Triggers rollover when the index reaches a certain size.",
+ Optional: true,
+ },
+ "max_primary_shard_docs": schema.Int64Attribute{
+ Description: "Triggers rollover when the largest primary shard in the index reaches a certain number of documents. Supported from Elasticsearch version **8.2**",
+ Optional: true,
+ },
+ "max_primary_shard_size": schema.StringAttribute{
+ Description: "Triggers rollover when the largest primary shard in the index reaches a certain size.",
+ Optional: true,
+ },
+ "min_age": schema.StringAttribute{
+ Description: "Prevents rollover until after the minimum elapsed time from index creation is reached. Supported from Elasticsearch version **8.4**",
+ Optional: true,
+ },
+ "min_docs": schema.Int64Attribute{
+ Description: "Prevents rollover until after the specified minimum number of documents is reached. Supported from Elasticsearch version **8.4**",
+ Optional: true,
+ },
+ "min_size": schema.StringAttribute{
+ Description: "Prevents rollover until the index reaches a certain size.",
+ Optional: true,
+ },
+ "min_primary_shard_docs": schema.Int64Attribute{
+ Description: "Prevents rollover until the largest primary shard in the index reaches a certain number of documents. Supported from Elasticsearch version **8.4**",
+ Optional: true,
+ },
+ "min_primary_shard_size": schema.StringAttribute{
+ Description: "Prevents rollover until the largest primary shard in the index reaches a certain size. Supported from Elasticsearch version **8.4**",
+ Optional: true,
+ },
+ },
+ })
+}
+
+func blockSearchableSnapshot() schema.SingleNestedBlock {
+ return singleNestedBlock("Takes a snapshot of the managed index in the configured repository and mounts it as a searchable snapshot.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "snapshot_repository": schema.StringAttribute{
+ Description: "Repository used to store the snapshot. Required when the `searchable_snapshot` action is configured.",
+ Optional: true,
+ },
+ "force_merge_index": schema.BoolAttribute{
+ Description: "Force merges the managed index to one segment.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(true),
+ },
+ },
+ }, objectvalidator.AlsoRequires(path.MatchRelative().AtName("snapshot_repository")))
+}
+
+func blockSetPriority() schema.SingleNestedBlock {
+ return singleNestedBlock(setPriorityActionDescription, schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "priority": schema.Int64Attribute{
+ Description: "The priority for the index. Must be 0 or greater. Required when the `set_priority` action is configured.",
+ Optional: true,
+ Validators: []validator.Int64{int64validator.AtLeast(0)},
+ },
+ },
+ }, objectvalidator.AlsoRequires(path.MatchRelative().AtName("priority")))
+}
+
+func blockShrink() schema.SingleNestedBlock {
+ return singleNestedBlock("Sets a source index to read-only and shrinks it into a new index with fewer primary shards.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "number_of_shards": schema.Int64Attribute{
+ Description: "Number of shards to shrink to.",
+ Optional: true,
+ },
+ "max_primary_shard_size": schema.StringAttribute{
+ Description: "The max primary shard size for the target index.",
+ Optional: true,
+ },
+ "allow_write_after_shrink": schema.BoolAttribute{
+ Description: "If true, the shrunken index is made writable by removing the write block.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(false),
+ },
+ },
+ })
+}
+
+func blockUnfollow() schema.SingleNestedBlock {
+ return singleNestedBlock("Convert a follower index to a regular index. Performed automatically before a rollover, shrink, or searchable snapshot action.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "enabled": schema.BoolAttribute{
+ Description: "Controls whether ILM makes the follower index a regular one.",
+ Optional: true,
+ Computed: true,
+ Default: booldefault.StaticBool(true),
+ },
+ },
+ })
+}
+
+func blockWaitForSnapshot() schema.SingleNestedBlock {
+ return singleNestedBlock("Waits for the specified SLM policy to be executed before removing the index. This ensures that a snapshot of the deleted index is available.", schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "policy": schema.StringAttribute{
+ Description: "Name of the SLM policy that the delete action should wait for. Required when the `wait_for_snapshot` action is configured.",
+ Optional: true,
+ },
+ },
+ }, objectvalidator.AlsoRequires(path.MatchRelative().AtName("policy")))
+}
+
+func blockDownsample() schema.SingleNestedBlock {
+ return singleNestedBlock(
+ "Roll up documents within a fixed interval to a single summary document. "+
+ "Reduces the index footprint by storing time series data at reduced granularity.",
+ schema.NestedBlockObject{
+ Attributes: map[string]schema.Attribute{
+ "fixed_interval": schema.StringAttribute{
+ Description: "Downsampling interval. Required when the `downsample` action is configured.",
+ Optional: true,
+ },
+ "wait_timeout": schema.StringAttribute{
+ Description: "Maximum time to wait for the downsample operation to complete before timing out.",
+ Optional: true,
+ Computed: true,
+ },
+ },
+ },
+ objectvalidator.AlsoRequires(path.MatchRelative().AtName("fixed_interval")),
+ )
+}
diff --git a/internal/elasticsearch/index/ilm/state_upgrade.go b/internal/elasticsearch/index/ilm/state_upgrade.go
new file mode 100644
index 000000000..3b4e579b9
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/state_upgrade.go
@@ -0,0 +1,100 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-go/tfprotov6"
+)
+
+// ilmPhaseBlockKeys are top-level ILM phase blocks (schema version 0 stored each as a singleton list).
+var ilmPhaseBlockKeys = [...]string{ilmPhaseHot, ilmPhaseWarm, ilmPhaseCold, ilmPhaseFrozen, ilmPhaseDelete}
+
+// migrateILMStateV0ToV1 unwraps list-wrapped nested blocks from schema version 0 into single objects.
+func migrateILMStateV0ToV1(_ context.Context, req resource.UpgradeStateRequest, resp *resource.UpgradeStateResponse) {
+ if req.RawState == nil || req.RawState.JSON == nil {
+ resp.Diagnostics.AddError("Invalid raw state", "Raw state or JSON is nil")
+ return
+ }
+
+ var stateMap map[string]any
+ err := json.Unmarshal(req.RawState.JSON, &stateMap)
+ if err != nil {
+ resp.Diagnostics.AddError("State upgrade error", "Could not unmarshal prior state: "+err.Error())
+ return
+ }
+
+ for _, pk := range ilmPhaseBlockKeys {
+ if raw, ok := stateMap[pk]; ok {
+ u := unwrapSingletonListToMap(raw)
+ if u == nil {
+ delete(stateMap, pk)
+ } else {
+ stateMap[pk] = u
+ }
+ }
+ }
+
+ stateJSON, err := json.Marshal(stateMap)
+ if err != nil {
+ resp.Diagnostics.AddError("State upgrade error", "Could not marshal new state: "+err.Error())
+ return
+ }
+ resp.DynamicValue = &tfprotov6.DynamicValue{
+ JSON: stateJSON,
+ }
+}
+
+func unwrapSingletonListToMap(v any) any {
+ list, ok := v.([]any)
+ if !ok {
+ return v
+ }
+ if len(list) == 0 {
+ return nil
+ }
+ first := list[0]
+ phaseObj, ok := first.(map[string]any)
+ if !ok {
+ return v
+ }
+ unwrapPhaseActionLists(phaseObj)
+ return phaseObj
+}
+
+func unwrapPhaseActionLists(m map[string]any) {
+ for k, v := range m {
+ if k == "min_age" {
+ continue
+ }
+ list, ok := v.([]any)
+ if !ok {
+ continue
+ }
+ if len(list) == 0 {
+ delete(m, k)
+ continue
+ }
+ if inner, ok := list[0].(map[string]any); ok {
+ m[k] = inner
+ }
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/state_upgrade_test.go b/internal/elasticsearch/index/ilm/state_upgrade_test.go
new file mode 100644
index 000000000..0f5876f69
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/state_upgrade_test.go
@@ -0,0 +1,100 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-go/tfprotov6"
+ "github.com/stretchr/testify/require"
+)
+
+func TestILMResourceUpgradeState(t *testing.T) {
+ t.Parallel()
+
+ raw := map[string]any{
+ "id": "cluster-uuid/policy-x",
+ "name": "policy-x",
+ "hot": []any{
+ map[string]any{
+ "min_age": "1h",
+ "set_priority": []any{
+ map[string]any{"priority": float64(10)},
+ },
+ "rollover": []any{
+ map[string]any{"max_age": "1d"},
+ },
+ "readonly": []any{
+ map[string]any{"enabled": true},
+ },
+ },
+ },
+ "delete": []any{
+ map[string]any{
+ "min_age": "0ms",
+ "delete": []any{
+ map[string]any{"delete_searchable_snapshot": true},
+ },
+ },
+ },
+ "elasticsearch_connection": []any{
+ map[string]any{"username": "u"},
+ },
+ }
+ rawJSON, err := json.Marshal(raw)
+ require.NoError(t, err)
+
+ r := &Resource{}
+ upgraders := r.UpgradeState(context.Background())
+ up, ok := upgraders[0]
+ require.True(t, ok)
+
+ req := resource.UpgradeStateRequest{
+ RawState: &tfprotov6.RawState{JSON: rawJSON},
+ }
+ resp := &resource.UpgradeStateResponse{}
+ up.StateUpgrader(context.Background(), req, resp)
+ require.False(t, resp.Diagnostics.HasError(), "%s", resp.Diagnostics)
+
+ var got map[string]any
+ require.NoError(t, json.Unmarshal(resp.DynamicValue.JSON, &got))
+
+ hot, ok := got["hot"].(map[string]any)
+ require.True(t, ok)
+ require.Equal(t, "1h", hot["min_age"])
+ sp, ok := hot["set_priority"].(map[string]any)
+ require.True(t, ok)
+ priority, ok := sp["priority"].(float64)
+ require.True(t, ok)
+ require.InEpsilon(t, 10.0, priority, 0.0001)
+ _, listSP := hot["set_priority"].([]any)
+ require.False(t, listSP)
+
+ del, ok := got["delete"].(map[string]any)
+ require.True(t, ok)
+ innerDel, ok := del["delete"].(map[string]any)
+ require.True(t, ok)
+ require.Equal(t, true, innerDel["delete_searchable_snapshot"])
+
+ conn, ok := got["elasticsearch_connection"].([]any)
+ require.True(t, ok)
+ require.Len(t, conn, 1)
+}
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILM/create/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/create/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILM/create/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/create/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILM/downsample/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/downsample/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILM/downsample/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/downsample/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILM/downsample_no_timeout/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/downsample_no_timeout/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILM/downsample_no_timeout/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/downsample_no_timeout/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILM/remove_actions/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/remove_actions/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILM/remove_actions/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/remove_actions/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILM/total_shards_per_node/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/total_shards_per_node/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILM/total_shards_per_node/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILM/total_shards_per_node/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILMColdPhase/create/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMColdPhase/create/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILMColdPhase/create/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILMColdPhase/create/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILMForcemerge/create/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMForcemerge/create/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILMForcemerge/create/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILMForcemerge/create/ilm.tf
diff --git a/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMFromSDK/create/resource.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMFromSDK/create/resource.tf
new file mode 100644
index 000000000..228c9905c
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMFromSDK/create/resource.tf
@@ -0,0 +1,29 @@
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+variable "policy_name" {
+ type = string
+}
+
+resource "elasticstack_elasticsearch_index_lifecycle" "test" {
+ name = var.policy_name
+
+ hot {
+ min_age = "1h"
+
+ set_priority {
+ priority = 10
+ }
+
+ rollover {
+ max_age = "1d"
+ }
+
+ readonly {}
+ }
+
+ delete {
+ delete {}
+ }
+}
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILMMetadata/create/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMMetadata/create/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILMMetadata/create/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILMMetadata/create/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILMMetadata/update/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMMetadata/update/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILMMetadata/update/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILMMetadata/update/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILMRolloverConditions/max_primary_shard_docs/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMRolloverConditions/max_primary_shard_docs/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILMRolloverConditions/max_primary_shard_docs/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILMRolloverConditions/max_primary_shard_docs/ilm.tf
diff --git a/internal/elasticsearch/index/testdata/TestAccResourceILMRolloverConditions/rollover_conditions/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILMRolloverConditions/rollover_conditions/ilm.tf
similarity index 100%
rename from internal/elasticsearch/index/testdata/TestAccResourceILMRolloverConditions/rollover_conditions/ilm.tf
rename to internal/elasticsearch/index/ilm/testdata/TestAccResourceILMRolloverConditions/rollover_conditions/ilm.tf
diff --git a/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_hotReadonlyDisabled/readonly_disabled/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_hotReadonlyDisabled/readonly_disabled/ilm.tf
new file mode 100644
index 000000000..45596800c
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_hotReadonlyDisabled/readonly_disabled/ilm.tf
@@ -0,0 +1,28 @@
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+variable "policy_name" {
+ type = string
+}
+
+resource "elasticstack_elasticsearch_index_lifecycle" "test_ro" {
+ name = var.policy_name
+
+ hot {
+ min_age = "1h"
+ set_priority {
+ priority = 10
+ }
+ rollover {
+ max_age = "1d"
+ }
+ readonly {
+ enabled = false
+ }
+ }
+
+ delete {
+ delete {}
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_importBasic/create/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_importBasic/create/ilm.tf
new file mode 100644
index 000000000..228c9905c
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_importBasic/create/ilm.tf
@@ -0,0 +1,29 @@
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+variable "policy_name" {
+ type = string
+}
+
+resource "elasticstack_elasticsearch_index_lifecycle" "test" {
+ name = var.policy_name
+
+ hot {
+ min_age = "1h"
+
+ set_priority {
+ priority = 10
+ }
+
+ rollover {
+ max_age = "1d"
+ }
+
+ readonly {}
+ }
+
+ delete {
+ delete {}
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_shrinkAllowWriteAfterShrink/shrink_write/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_shrinkAllowWriteAfterShrink/shrink_write/ilm.tf
new file mode 100644
index 000000000..30c74f359
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_shrinkAllowWriteAfterShrink/shrink_write/ilm.tf
@@ -0,0 +1,29 @@
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+variable "policy_name" {
+ type = string
+}
+
+resource "elasticstack_elasticsearch_index_lifecycle" "test_shrink" {
+ name = var.policy_name
+
+ hot {
+ min_age = "1h"
+ set_priority {
+ priority = 10
+ }
+ rollover {
+ max_age = "1d"
+ }
+ shrink {
+ number_of_shards = 1
+ allow_write_after_shrink = true
+ }
+ }
+
+ delete {
+ delete {}
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_warmMigrateDisabled/migrate_warm/ilm.tf b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_warmMigrateDisabled/migrate_warm/ilm.tf
new file mode 100644
index 000000000..bcc8776b0
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/testdata/TestAccResourceILM_warmMigrateDisabled/migrate_warm/ilm.tf
@@ -0,0 +1,36 @@
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+variable "policy_name" {
+ type = string
+}
+
+resource "elasticstack_elasticsearch_index_lifecycle" "test_migrate" {
+ name = var.policy_name
+
+ hot {
+ min_age = "1h"
+ set_priority {
+ priority = 10
+ }
+ rollover {
+ max_age = "1d"
+ }
+ readonly {}
+ }
+
+ warm {
+ min_age = "0ms"
+ set_priority {
+ priority = 50
+ }
+ migrate {
+ enabled = false
+ }
+ }
+
+ delete {
+ delete {}
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/update.go b/internal/elasticsearch/index/ilm/update.go
new file mode 100644
index 000000000..0ad0a3f37
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/update.go
@@ -0,0 +1,72 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+)
+
+func (r *Resource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ var plan tfModel
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ client, diags := clients.MaybeNewAPIClientFromFrameworkResource(ctx, plan.ElasticsearchConnection, r.client)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ sv, diags := serverVersionFW(ctx, client)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ policy, diags := policyFromModel(ctx, &plan, sv)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ policy.Name = plan.Name.ValueString()
+
+ resp.Diagnostics.Append(elasticsearch.PutIlm(ctx, client, policy)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ prior := plan
+
+ out, diags := readFull(ctx, client, plan.Name.ValueString(), &prior)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ if out == nil {
+ resp.Diagnostics.AddError("ILM policy missing after update", plan.Name.ValueString())
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, out)...)
+}
diff --git a/internal/elasticsearch/index/ilm/validate.go b/internal/elasticsearch/index/ilm/validate.go
new file mode 100644
index 000000000..b15ed5306
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/validate.go
@@ -0,0 +1,48 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func (r *Resource) ValidateConfig(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse) {
+ var c tfModel
+ resp.Diagnostics.Append(req.Config.Get(ctx, &c)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ if c.Hot.IsUnknown() || c.Warm.IsUnknown() || c.Cold.IsUnknown() || c.Frozen.IsUnknown() || c.Delete.IsUnknown() {
+ return
+ }
+ hasPhase := phaseObjectNonEmpty(c.Hot) || phaseObjectNonEmpty(c.Warm) || phaseObjectNonEmpty(c.Cold) ||
+ phaseObjectNonEmpty(c.Frozen) || phaseObjectNonEmpty(c.Delete)
+ if !hasPhase {
+ resp.Diagnostics.AddError(
+ "Missing phase configuration",
+ "At least one of `hot`, `warm`, `cold`, `frozen`, or `delete` blocks must be configured.",
+ )
+ }
+}
+
+func phaseObjectNonEmpty(o types.Object) bool {
+ return !o.IsNull() && !o.IsUnknown()
+}
diff --git a/internal/elasticsearch/index/ilm/value_conv.go b/internal/elasticsearch/index/ilm/value_conv.go
new file mode 100644
index 000000000..8e6800e39
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/value_conv.go
@@ -0,0 +1,192 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+func phaseMapToObjectValue(ctx context.Context, phaseName string, data map[string]any) (types.Object, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ ot := phaseObjectType(phaseName)
+ attrs, d := phaseDataToObjectAttrs(ctx, ot, data)
+ diags.Append(d...)
+ if diags.HasError() {
+ return types.ObjectUnknown(ot.AttrTypes), diags
+ }
+ obj, d := types.ObjectValue(ot.AttrTypes, attrs)
+ diags.Append(d...)
+ return obj, diags
+}
+
+func phaseDataToObjectAttrs(ctx context.Context, ot types.ObjectType, data map[string]any) (map[string]attr.Value, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ attrs := make(map[string]attr.Value)
+ for k, elemT := range ot.AttrTypes {
+ raw, ok := data[k]
+ if !ok || raw == nil {
+ attrs[k] = nullValueForType(elemT)
+ continue
+ }
+ v, d := anyToAttr(ctx, elemT, raw)
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+ attrs[k] = v
+ }
+ return attrs, diags
+}
+
+func nullValueForType(t attr.Type) attr.Value {
+ if lt, ok := t.(types.ListType); ok {
+ return types.ListNull(lt.ElemType)
+ }
+ if t.Equal(types.StringType) {
+ return types.StringNull()
+ }
+ if t.Equal(types.Int64Type) {
+ return types.Int64Null()
+ }
+ if t.Equal(types.BoolType) {
+ return types.BoolNull()
+ }
+ if _, ok := t.(jsontypes.NormalizedType); ok {
+ return jsontypes.NewNormalizedNull()
+ }
+ if ot, ok := t.(types.ObjectType); ok {
+ return types.ObjectNull(ot.AttrTypes)
+ }
+ return types.StringNull()
+}
+
+func anyToAttr(ctx context.Context, t attr.Type, raw any) (attr.Value, diag.Diagnostics) {
+ var diags diag.Diagnostics
+ if t.Equal(types.StringType) {
+ s, ok := raw.(string)
+ if !ok {
+ diags.AddError("Type mismatch", fmt.Sprintf("expected string, got %T", raw))
+ return types.StringUnknown(), diags
+ }
+ return types.StringValue(s), diags
+ }
+ if t.Equal(types.Int64Type) {
+ n, ok := coerceInt64(raw)
+ if !ok {
+ diags.AddError("Type mismatch", fmt.Sprintf("expected number, got %T", raw))
+ return types.Int64Unknown(), diags
+ }
+ return types.Int64Value(n), diags
+ }
+ if t.Equal(types.BoolType) {
+ b, ok := raw.(bool)
+ if !ok {
+ diags.AddError("Type mismatch", fmt.Sprintf("expected bool, got %T", raw))
+ return types.BoolUnknown(), diags
+ }
+ return types.BoolValue(b), diags
+ }
+ if ty, ok := t.(types.ListType); ok {
+ slice, ok := raw.([]any)
+ if !ok || len(slice) == 0 {
+ return types.ListNull(ty.ElemType), diags
+ }
+ elemOT, ok := ty.ElemType.(types.ObjectType)
+ if !ok {
+ diags.AddError("Internal error", "list element must be object")
+ return types.ListUnknown(ty.ElemType), diags
+ }
+ m, ok := slice[0].(map[string]any)
+ if !ok {
+ diags.AddError("Type mismatch", fmt.Sprintf("expected object map, got %T", slice[0]))
+ return types.ListUnknown(ty.ElemType), diags
+ }
+ innerAttrs, d := phaseDataToObjectAttrs(ctx, elemOT, m)
+ diags.Append(d...)
+ if diags.HasError() {
+ return types.ListUnknown(ty.ElemType), diags
+ }
+ obj, d := types.ObjectValue(elemOT.AttrTypes, innerAttrs)
+ diags.Append(d...)
+ if diags.HasError() {
+ return types.ListUnknown(ty.ElemType), diags
+ }
+ lv, d := types.ListValueFrom(ctx, ty.ElemType, []attr.Value{obj})
+ diags.Append(d...)
+ return lv, diags
+ }
+ if _, ok := t.(jsontypes.NormalizedType); ok {
+ s, ok := raw.(string)
+ if !ok {
+ diags.AddError("Type mismatch", fmt.Sprintf("expected JSON string, got %T", raw))
+ return jsontypes.NewNormalizedUnknown(), diags
+ }
+ return jsontypes.NewNormalizedValue(s), diags
+ }
+ if ty, ok := t.(types.ObjectType); ok {
+ if slice, ok := raw.([]any); ok {
+ if len(slice) == 0 {
+ return types.ObjectNull(ty.AttrTypes), diags
+ }
+ var ok2 bool
+ raw, ok2 = slice[0].(map[string]any)
+ if !ok2 {
+ diags.AddError("Type mismatch", fmt.Sprintf("expected object map inside list, got %T", slice[0]))
+ return types.ObjectUnknown(ty.AttrTypes), diags
+ }
+ }
+ m, ok := raw.(map[string]any)
+ if !ok {
+ diags.AddError("Type mismatch", fmt.Sprintf("expected map, got %T", raw))
+ return types.ObjectUnknown(ty.AttrTypes), diags
+ }
+ innerAttrs, d := phaseDataToObjectAttrs(ctx, ty, m)
+ diags.Append(d...)
+ if diags.HasError() {
+ return types.ObjectUnknown(ty.AttrTypes), diags
+ }
+ ov, d := types.ObjectValue(ty.AttrTypes, innerAttrs)
+ diags.Append(d...)
+ return ov, diags
+ }
+ diags.AddError("Internal error", fmt.Sprintf("unsupported attr type %T", t))
+ return types.StringUnknown(), diags
+}
+
+func coerceInt64(v any) (int64, bool) {
+ switch n := v.(type) {
+ case int:
+ return int64(n), true
+ case int64:
+ return n, true
+ case float64:
+ return int64(n), true
+ case json.Number:
+ i, err := n.Int64()
+ return i, err == nil
+ default:
+ return 0, false
+ }
+}
diff --git a/internal/elasticsearch/index/ilm/versions.go b/internal/elasticsearch/index/ilm/versions.go
new file mode 100644
index 000000000..8cf7280e8
--- /dev/null
+++ b/internal/elasticsearch/index/ilm/versions.go
@@ -0,0 +1,39 @@
+// Licensed to Elasticsearch B.V. under one or more contributor
+// license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright
+// ownership. Elasticsearch B.V. licenses this file to you under
+// the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package ilm
+
+import "github.com/hashicorp/go-version"
+
+// currentSchemaVersion is the Terraform resource schema version (state upgrades run from prior versions).
+const currentSchemaVersion int64 = 1
+
+const (
+ ilmPhaseHot = "hot"
+ ilmPhaseWarm = "warm"
+ ilmPhaseCold = "cold"
+ ilmPhaseFrozen = "frozen"
+ ilmPhaseDelete = "delete"
+)
+
+// RolloverMinConditionsMinSupportedVersion is the minimum Elasticsearch version for rollover min_* conditions.
+var RolloverMinConditionsMinSupportedVersion = version.Must(version.NewVersion("8.4.0"))
+
+// MaxPrimaryShardDocsMinSupportedVersion is the minimum Elasticsearch version for max_primary_shard_docs.
+var MaxPrimaryShardDocsMinSupportedVersion = version.Must(version.NewVersion("8.2.0"))
+
+var supportedIlmPhases = [...]string{ilmPhaseHot, ilmPhaseWarm, ilmPhaseCold, ilmPhaseFrozen, ilmPhaseDelete}
diff --git a/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/.openspec.yaml b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/.openspec.yaml
new file mode 100644
index 000000000..2ca4bc851
--- /dev/null
+++ b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/.openspec.yaml
@@ -0,0 +1,2 @@
+schema: spec-driven
+created: 2026-03-24
diff --git a/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/design.md b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/design.md
new file mode 100644
index 000000000..4c79d737f
--- /dev/null
+++ b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/design.md
@@ -0,0 +1,50 @@
+## Context
+
+Canonical requirements for this resource live in [`openspec/specs/elasticsearch-index-lifecycle/spec.md`](../../specs/elasticsearch-index-lifecycle/spec.md). Implementation lives in [`internal/elasticsearch/index/ilm/`](../../../internal/elasticsearch/index/ilm/). Elasticsearch policy expansion today expects each action as a **one-element slice** in the internal map (`expand.go`); the Framework layer can keep producing that shape from **object**-typed action attributes by wrapping in `[]any{m}` inside `attrValueToExpandRaw` (or equivalent).
+
+## Goals / Non-Goals
+
+**Goals:**
+
+- Align Plugin Framework schema with **single nested block** semantics for phases and ILM actions so documentation and Terraform state match SDK “max one” intent.
+- **Migrate existing state** from list-encoded nested values to object-encoded values without manual intervention.
+- Enforce **required-when-present** action fields via **`objectvalidator.AlsoRequires`** after making those attributes optional.
+- Preserve **readonly / freeze / unfollow** flatten semantics that depend on prior state (`priorHasDeclaredToggle`).
+
+**Non-goals:**
+
+- Changing **`elasticsearch_connection`** block type or connection resolution (REQ-009–REQ-010).
+- Changing Elasticsearch API payloads beyond what is needed to preserve behavior after schema/type refactors.
+- Reworking rollover “at least one condition” style rules unless already required by schema (rollover remains all-optional attributes at the Framework level today).
+
+## Decisions
+
+- **SingleNestedBlock** for `hot`, `warm`, `cold`, `frozen`, `delete` and for every ILM action block under those phases; **list nested block** only for `elasticsearch_connection`.
+- **State upgrade**: Raw JSON map walk; unwrap `[]any` with `len >= 1` → first element for **known** keys only (phases at root; action keys per phase; include delete-phase inner `delete` action block name). Empty list → omit / null consistent with new state. **`len > 1`**: use first element (pragmatic recovery from invalid state).
+- **AlsoRequires**: Follow [`internal/kibana/alertingrule/schema.go`](../../../internal/kibana/alertingrule/schema.go) pattern (`path.MatchRelative().AtName(...)`). Attach validators on **`SingleNestedBlock`** where the framework allows; otherwise on **`NestedBlockObject`** if any action stays list-wrapped during transition.
+- **AlsoRequires field set**:
+
+ | Action | Paths |
+ |--------|--------|
+ | `forcemerge` | `max_num_segments` |
+ | `searchable_snapshot` | `snapshot_repository` |
+ | `set_priority` | `priority` |
+ | `wait_for_snapshot` | `policy` |
+ | `downsample` | `fixed_interval` |
+
+## Risks / Trade-offs
+
+- **Acceptance test churn**: All `TestCheckResourceAttr` paths that assume list indices for phases/actions must be updated.
+- **State upgrade bugs**: Missing or mistyped keys could leave stale list-shaped state; mitigate with unit tests and manual upgrade test against a v0 state fixture.
+- **Empty action block**: Validation must fail with a clear diagnostic when AlsoRequires is violated.
+
+## Migration Plan
+
+1. Land implementation and delta spec under this change.
+2. Run `make check-openspec` / `openspec validate` on the change (when CLI available).
+3. After review, **sync** delta into `openspec/specs/elasticsearch-index-lifecycle/spec.md` (or archive per workflow).
+4. Release note: mention state upgrade and doc shape for nested blocks.
+
+## Open Questions
+
+- None.
diff --git a/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/proposal.md b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/proposal.md
new file mode 100644
index 000000000..27239d7c0
--- /dev/null
+++ b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/proposal.md
@@ -0,0 +1,31 @@
+## Why
+
+The Plugin Framework ILM resource models each phase and each ILM action as **list nested blocks** capped at one element (`listvalidator.SizeBetween(0, 1)`). That matches legacy SDK `MaxItems: 1` behavior but **generated documentation** describes them as unbounded “Block List” instead of a single nested object, unlike the SDK resource.
+
+Separately, with **single nested block** semantics, an empty action block (e.g. `forcemerge {}`) is a present object with unset child attributes. Marking those children `Required: true` produces awkward validation compared to **omitting the block**; the provider should use **optional attributes** plus **object-level `AlsoRequires`** so “block present ⇒ required fields set” is enforced without requiring the block when the action is unused.
+
+## What Changes
+
+- **Schema**: Replace list-nested phases and actions with **`SingleNestedBlock`** in `internal/elasticsearch/index/ilm/schema.go` and `schema_actions.go` (remove list max-one validators where obsolete). Keep **`elasticsearch_connection`** as the shared list nested block.
+- **Model / read / expand**: Retype phase and action fields to **`types.Object`** (and object-shaped nested attrs), updating `attr_types.go`, `models.go`, `validate.go`, `policy.go`, `flatten.go`, `value_conv.go`, `model_expand.go` so API expansion and flatten behavior stay equivalent.
+- **State**: Bump resource **schema version**; implement **`ResourceWithUpgradeState`** with JSON migration **v0 → v1** that unwraps list-shaped state only for known phase and action keys (not `elasticsearch_connection`). Add a **unit test** for the upgrader.
+- **Action validation**: For `forcemerge`, `searchable_snapshot`, `set_priority`, `wait_for_snapshot`, and `downsample`, make previously **required** attributes **optional** and attach **`objectvalidator.AlsoRequires`** on the parent action object (or `SingleNestedBlock` validators) for the former required paths.
+- **Tests**: Update **`acc_test.go`** flat state attribute paths (remove `.0` segments for single-nested values); leave **`testdata/**/*.tf`** unchanged.
+- **Docs**: Regenerate Terraform resource docs after schema change.
+- **OpenSpec**: Delta under `specs/elasticsearch-index-lifecycle/spec.md` adds normative requirements for the above; sync into `openspec/specs/elasticsearch-index-lifecycle/spec.md` when the change is applied.
+
+## Capabilities
+
+### New Capabilities
+
+- _(none)_
+
+### Modified Capabilities
+
+- **`elasticsearch-index-lifecycle`**: Schema shape (single nested blocks), state upgrade, action validation rules, and spec text aligned with implementation.
+
+## Impact
+
+- **Users**: Saved state upgrades automatically on first apply after upgrade; HCL using `hot { }` / `forcemerge { ... }` remains valid. Flat state keys in `terraform show` and in tests change (no `hot.0.*` list indices for phases/actions).
+- **Code**: `internal/elasticsearch/index/ilm/` (schema, model, flatten, expand helpers, resource, new `state_upgrade.go`, tests).
+- **Maintenance**: Clearer generated docs (“nested block” vs list); behavior matches legacy SDK cardinality.
diff --git a/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/specs/elasticsearch-index-lifecycle/spec.md b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/specs/elasticsearch-index-lifecycle/spec.md
new file mode 100644
index 000000000..334a74960
--- /dev/null
+++ b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/specs/elasticsearch-index-lifecycle/spec.md
@@ -0,0 +1,73 @@
+## ADDED Requirements
+
+### Requirement: Single nested blocks for phases and actions (REQ-020)
+
+The resource SHALL model each of the phase blocks `hot`, `warm`, `cold`, `frozen`, and `delete` as a **Plugin Framework `SingleNestedBlock`** (at most one block per phase in configuration; state stores a single nested object or null when absent), not as a list nested block with a maximum length of one.
+
+Each ILM action block allowed under a phase (for example `set_priority`, `rollover`, `forcemerge`, `searchable_snapshot`, `wait_for_snapshot`, `delete`, and other actions defined by the provider schema) SHALL likewise be modeled as a **`SingleNestedBlock`**.
+
+The **`elasticsearch_connection`** block SHALL remain a **list nested block** as provided by the shared provider connection schema.
+
+#### Scenario: Phase block cardinality
+
+- GIVEN a Terraform configuration for this resource
+- WHEN the user declares a phase (for example `hot { ... }`)
+- THEN the schema SHALL allow at most one such block for that phase and SHALL persist that phase as an object-shaped value in state, not as a single-element list
+
+#### Scenario: Action block cardinality
+
+- GIVEN a phase that supports an ILM action block
+- WHEN the user declares that action (for example `forcemerge { ... }`)
+- THEN the schema SHALL allow at most one such block and SHALL persist it as an object-shaped value in state, not as a single-element list
+
+### Requirement: State schema version and upgrade (REQ-021)
+
+The resource SHALL use a **non-zero** `schema.Schema.Version` for this resource type after this change.
+
+The resource SHALL implement **`ResourceWithUpgradeState`** and SHALL migrate stored Terraform state from the **prior version** (list-shaped nested values for phases and ILM actions) to the **new version** (object-shaped nested values) for the same logical configuration.
+
+The migration SHALL unwrap list-encoded values **only** for known ILM phase keys and known ILM action keys under those phases (including the delete-phase ILM action block named `delete`). The migration SHALL **not** alter the encoding of **`elasticsearch_connection`**.
+
+#### Scenario: Upgrade from list-shaped phase state
+
+- GIVEN persisted state where a phase is stored as a JSON array containing one object
+- WHEN Terraform loads state and runs the state upgrader
+- THEN the upgraded state SHALL store that phase as a single object (or equivalent null) consistent with `SingleNestedBlock` semantics
+
+#### Scenario: Connection block unchanged by upgrade
+
+- GIVEN persisted state that includes `elasticsearch_connection` as a list
+- WHEN the state upgrader runs
+- THEN the `elasticsearch_connection` value SHALL remain list-shaped as defined by the connection schema
+
+### Requirement: Action fields optional with object-level AlsoRequires (REQ-022)
+
+For the ILM action blocks **`forcemerge`**, **`searchable_snapshot`**, **`set_priority`**, **`wait_for_snapshot`**, and **`downsample`**, each attribute that is **required for API correctness when the action is declared** SHALL be **optional** at the Terraform attribute level (so an entirely omitted action block does not force those attributes to appear).
+
+When the user **declares** one of these action blocks in configuration, validation SHALL require that all of the following previously required attributes are set (non-null), using object-level validation equivalent to **`objectvalidator.AlsoRequires`**:
+
+- **`forcemerge`**: `max_num_segments`
+- **`searchable_snapshot`**: `snapshot_repository`
+- **`set_priority`**: `priority`
+- **`wait_for_snapshot`**: `policy`
+- **`downsample`**: `fixed_interval`
+
+Existing attribute-level validators (for example minimum values) SHALL remain on those attributes where applicable.
+
+#### Scenario: Omitted action block is valid
+
+- GIVEN a phase without a particular action block (for example no `forcemerge` block)
+- WHEN Terraform validates configuration
+- THEN validation SHALL NOT fail solely because `max_num_segments` is unset
+
+#### Scenario: Empty action block is invalid
+
+- GIVEN the user declares `forcemerge { }` with no attributes
+- WHEN Terraform validates configuration
+- THEN validation SHALL fail with a diagnostic indicating the required fields when the block is present
+
+#### Scenario: Searchable snapshot requires repository when present
+
+- GIVEN the user declares `searchable_snapshot { force_merge_index = true }` without `snapshot_repository`
+- WHEN Terraform validates configuration
+- THEN validation SHALL fail with a diagnostic
diff --git a/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/tasks.md b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/tasks.md
new file mode 100644
index 000000000..85cb602a3
--- /dev/null
+++ b/openspec/changes/archive/2026-03-24-ilm-single-nested-and-action-validation/tasks.md
@@ -0,0 +1,28 @@
+## 1. Schema and validation
+
+- [x] 1.1 Convert phase blocks in `schema.go` to `SingleNestedBlock`; set `schema.Schema.Version` to `1`.
+- [x] 1.2 Convert action helpers in `schema_actions.go` to `SingleNestedBlock`; remove `listBlockSingle` / list validators where obsolete.
+- [x] 1.3 For `forcemerge`, `searchable_snapshot`, `set_priority`, `wait_for_snapshot`, `downsample`: make former required attributes **optional**; add **`objectvalidator.AlsoRequires`** for each former required field on the parent action object / block.
+- [x] 1.4 Keep `elasticsearch_connection` as the shared list nested block from `internal/schema/connection.go`.
+
+## 2. Model, flatten, expand
+
+- [x] 2.1 Update `attr_types.go`: action fields as object types (not `ListType` wrappers); phase top-level as object types.
+- [x] 2.2 Update `models.go`, `validate.go`, `policy.go`, `flatten.go`, `value_conv.go`, `model_expand.go` for `types.Object` phases and object action attributes; preserve `expand.go` contract via wrapping single objects as `[]any{m}` where needed.
+- [x] 2.3 Preserve toggle semantics for `readonly` / `freeze` / `unfollow` in `flatten.go` (`priorHasDeclaredToggle`).
+
+## 3. State upgrade
+
+- [x] 3.1 Implement `ResourceWithUpgradeState` on the ILM resource; map `0 → migrateV0ToV1` JSON transform (key-aware unwrap only).
+- [x] 3.2 Add `state_upgrade.go` (or equivalent) and unit test `TestILMResourceUpgradeState` with v0- and v1-shaped fixtures.
+
+## 4. Tests and docs
+
+- [x] 4.1 Update `internal/elasticsearch/index/ilm/acc_test.go` attribute paths for single-nested state (no `.0` for phase/action segments); do not edit `testdata/**/*.tf` HCL.
+- [x] 4.2 Regenerate `docs/resources/elasticsearch_index_lifecycle.md` (or project doc target).
+- [x] 4.3 Run `make build` and targeted ILM acceptance tests.
+
+## 5. OpenSpec
+
+- [x] 5.1 Keep delta spec `openspec/changes/ilm-single-nested-and-action-validation/specs/elasticsearch-index-lifecycle/spec.md` aligned with implementation.
+- [ ] 5.2 After merge decision: **sync** into `openspec/specs/elasticsearch-index-lifecycle/spec.md` or **archive** the change per project workflow; run `make check-openspec`.
diff --git a/openspec/specs/elasticsearch-index-lifecycle/spec.md b/openspec/specs/elasticsearch-index-lifecycle/spec.md
new file mode 100644
index 000000000..f7d91bac3
--- /dev/null
+++ b/openspec/specs/elasticsearch-index-lifecycle/spec.md
@@ -0,0 +1,448 @@
+# `elasticstack_elasticsearch_index_lifecycle` — Schema and Functional Requirements
+
+Resource implementation: `internal/elasticsearch/index/ilm/`
+
+## Purpose
+
+Define the Terraform schema and runtime behavior for managing **Elasticsearch index lifecycle management (ILM) policies**: creating and updating policies on the cluster, reading them into state (including refresh and drift detection), deleting them, importing existing policies, choosing the Elasticsearch connection, and enforcing **server-version gates** for ILM action fields that only exist on newer Elasticsearch releases.
+
+## Schema
+
+### Top-level attributes
+
+```hcl
+resource "elasticstack_elasticsearch_index_lifecycle" "example" {
+ id = # /
+ name = # policy identifier; force new
+ metadata = # valid JSON; normalized diff
+ modified_date = # last modification time from the cluster
+
+ # At least one of hot, warm, cold, frozen, delete MUST be set (schema: AtLeastOneOf).
+ # Each phase is a Plugin Framework SingleNestedBlock (at most one block per phase; state stores a single object or null).
+ hot { /* phase_hot */ }
+ warm { /* phase_warm */ }
+ cold { /* phase_cold */ }
+ frozen { /* phase_frozen */ }
+ delete { /* phase_delete */ }
+
+ elasticsearch_connection {
+ endpoints =
+ username =
+ password =
+ api_key =
+ bearer_token =
+ es_client_authentication =
+ insecure =
+ headers =
+ ca_file =
+ ca_data =
+ cert_file =
+ key_file =
+ cert_data =
+ key_data =
+ }
+}
+```
+
+In Terraform configuration, each phase is written as a **`SingleNestedBlock`** (for example `hot { ... }`). State stores that phase as an object-shaped value (or null when absent), not as a single-element list.
+
+### Per-phase object (common)
+
+Every phase object MAY include:
+
+| Attribute | Constraint | Notes |
+|-----------|--------------|--------|
+| `min_age` | optional + computed, string | Minimum age before entering this phase; may be populated from the cluster on read. |
+
+### Allowed nested actions by phase
+
+| Phase | Nested action blocks (each is a **`SingleNestedBlock`**) |
+|-------|-----------------------------------------------------------------------------|
+| **hot** | `set_priority`, `unfollow`, `rollover`, `readonly`, `shrink`, `forcemerge`, `searchable_snapshot`, `downsample` |
+| **warm** | `set_priority`, `unfollow`, `readonly`, `allocate`, `migrate`, `shrink`, `forcemerge`, `downsample` |
+| **cold** | `set_priority`, `unfollow`, `readonly`, `searchable_snapshot`, `allocate`, `migrate`, `freeze`, `downsample` |
+| **frozen** | `searchable_snapshot` only (plus `min_age`) |
+| **delete** | `wait_for_snapshot`, `delete` (the ILM delete action; plus `min_age`) |
+
+### Nested action block schemas
+
+Each action below is expressed as Terraform nested block syntax. All such blocks are **optional** and use **`SingleNestedBlock`** semantics (`action { ... }`); state stores each declared action as an object, not as a list.
+
+```hcl
+# allocate — warm, cold only
+allocate {
+ number_of_replicas =
+ total_shards_per_node = # ES >= 7.16 when non-default
+ include = # JSON object as string; normalized diff
+ exclude =
+ require =
+}
+
+# delete — delete phase only (ILM action that removes the index)
+delete {
+ delete_searchable_snapshot =
+}
+
+# forcemerge — hot, warm only
+# When the block is omitted, max_num_segments is not required. When the block is declared, max_num_segments is required (object-level AlsoRequires).
+forcemerge {
+ max_num_segments = = 1> # required when block is present
+ index_codec =
+}
+
+# freeze — cold only
+freeze {
+ enabled = # when false, action omitted from API (see requirements)
+}
+
+# migrate — warm, cold only
+migrate {
+ enabled =
+}
+
+# readonly — hot, warm, cold only
+readonly {
+ enabled =
+}
+
+# rollover — hot only
+rollover {
+ max_age =
+ max_docs =
+ max_size =
+ max_primary_shard_docs = # ES >= 8.2 when non-default
+ max_primary_shard_size =
+ min_age = # ES >= 8.4 when non-default
+ min_docs = # ES >= 8.4 when non-default
+ min_size =
+ min_primary_shard_docs = # ES >= 8.4 when non-default
+ min_primary_shard_size = # ES >= 8.4 when non-default
+}
+
+# searchable_snapshot — hot, cold, frozen only
+# snapshot_repository required when block is present (object-level AlsoRequires).
+searchable_snapshot {
+ snapshot_repository = # required when block is present
+ force_merge_index =
+}
+
+# set_priority — hot, warm, cold only
+# priority required when block is present (object-level AlsoRequires).
+set_priority {
+ priority = = 0> # required when block is present; index recovery priority for this phase
+}
+
+# shrink — hot, warm only
+shrink {
+ number_of_shards =
+ max_primary_shard_size =
+ allow_write_after_shrink = # ES >= 8.14 when non-default
+}
+
+# unfollow — hot, warm, cold only
+unfollow {
+ enabled =
+}
+
+# wait_for_snapshot — delete phase only
+# policy required when block is present (object-level AlsoRequires).
+wait_for_snapshot {
+ policy = # required when block is present; SLM policy name to wait for
+}
+
+# downsample — hot, warm, cold only
+# fixed_interval required when block is present (object-level AlsoRequires).
+downsample {
+ fixed_interval = # required when block is present
+ wait_timeout = # may be set by the cluster on read
+}
+```
+
+### Example: fully expanded phase shapes (illustrative)
+
+Each phase is one `SingleNestedBlock` (e.g. `hot { min_age = "1h" ... }`).
+
+```hcl
+ hot {
+ min_age =
+
+ set_priority { priority = }
+ unfollow { enabled = }
+ rollover {
+ max_age =
+ # ... all rollover fields per table above
+ }
+ readonly { enabled = }
+ shrink {
+ number_of_shards =
+ max_primary_shard_size =
+ allow_write_after_shrink =
+ }
+ forcemerge {
+ max_num_segments =
+ index_codec =
+ }
+ searchable_snapshot {
+ snapshot_repository =
+ force_merge_index =
+ }
+ downsample {
+ fixed_interval =
+ wait_timeout =
+ }
+ }
+
+ warm {
+ min_age =
+ set_priority { ... }
+ unfollow { ... }
+ readonly { ... }
+ allocate { ... }
+ migrate { ... }
+ shrink { ... }
+ forcemerge { ... }
+ downsample { ... }
+ }
+
+ cold {
+ min_age =
+ set_priority { ... }
+ unfollow { ... }
+ readonly { ... }
+ searchable_snapshot { ... }
+ allocate { ... }
+ migrate { ... }
+ freeze { ... }
+ downsample { ... }
+ }
+
+ frozen {
+ min_age =
+ searchable_snapshot {
+ snapshot_repository =
+ force_merge_index =
+ }
+ }
+
+ delete {
+ min_age =
+ wait_for_snapshot { policy = }
+ delete { delete_searchable_snapshot = }
+ }
+```
+
+## Requirements
+
+### Requirement: ILM policy CRUD APIs (REQ-001–REQ-003)
+
+The resource SHALL use the Elasticsearch **Put lifecycle policy** API to create and update ILM policies ([Put lifecycle API](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html)). The resource SHALL use the **Get lifecycle policy** API to read policies ([Get lifecycle API](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html)). The resource SHALL use the **Delete lifecycle policy** API to delete policies ([Delete lifecycle API](https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html)).
+
+#### Scenario: Documented APIs for lifecycle operations
+
+- GIVEN an ILM policy managed by this resource
+- WHEN create, update, read, or delete runs
+- THEN the provider SHALL call the Put, Get, and Delete lifecycle APIs as documented
+
+### Requirement: API error surfacing (REQ-004)
+
+When Elasticsearch returns a non-success response for create, update, or delete, or for read when the response is not a successful retrieval (excluding **not found** on read as specified elsewhere), the resource SHALL surface the error in Terraform diagnostics.
+
+#### Scenario: Non-success response
+
+- GIVEN an Elasticsearch error on create, update, read (other than not found), or delete
+- WHEN the provider handles the response
+- THEN the error SHALL appear in diagnostics
+
+### Requirement: Identity (REQ-005–REQ-006)
+
+The resource SHALL expose a computed `id` in the format `/`. After a successful create or update, the resource SHALL set `id` using the target cluster identifier and the configured policy `name`.
+
+#### Scenario: Computed id after apply
+
+- GIVEN a successful create or update
+- WHEN state is written
+- THEN `id` SHALL equal `/` for the connected cluster and configured name
+
+### Requirement: Import (REQ-007)
+
+The resource SHALL support import using **passthrough** of the stored `id` (no custom import logic). The imported value SHALL be the same composite `id` format used in state.
+
+#### Scenario: Import by id
+
+- GIVEN an import id in the form `/`
+- WHEN import completes
+- THEN state SHALL retain that `id` for subsequent read
+
+### Requirement: Policy name lifecycle (REQ-008)
+
+When the `name` argument changes, the resource SHALL require **replacement** (new policy identity), not an in-place rename via the same resource instance.
+
+#### Scenario: Renaming a policy
+
+- GIVEN a planned change to `name`
+- WHEN Terraform evaluates the resource
+- THEN replacement SHALL be required
+
+### Requirement: Elasticsearch connection (REQ-009–REQ-010)
+
+By default, the resource SHALL use the provider-configured Elasticsearch client. When `elasticsearch_connection` is set, the resource SHALL use a **resource-scoped** Elasticsearch client for all API calls for that instance.
+
+#### Scenario: Override connection
+
+- GIVEN `elasticsearch_connection` is configured
+- WHEN create, read, update, or delete runs
+- THEN API calls SHALL use the connection defined by that block
+
+### Requirement: Create and update flow (REQ-011)
+
+Create and update SHALL both **put** the full policy definition derived from configuration, then **read** the policy back into state so computed fields and cluster-returned values are refreshed.
+
+#### Scenario: Read after write
+
+- GIVEN a successful put
+- WHEN create or update finishes
+- THEN read logic SHALL run to populate state
+
+### Requirement: Read and absent policy (REQ-012–REQ-013)
+
+Read SHALL parse `id` as a composite identifier; if the format is invalid, the resource SHALL return an error diagnostic. When the lifecycle API indicates the policy **does not exist**, the resource SHALL **remove the resource from state** (empty `id`) and SHALL log a warning that the policy was not found.
+
+#### Scenario: Policy removed outside Terraform
+
+- GIVEN the policy was deleted on the cluster
+- WHEN refresh runs
+- THEN the resource SHALL be removed from state and SHALL not fail with a hard error solely due to absence
+
+#### Scenario: Invalid stored id
+
+- GIVEN `id` is not `/`
+- WHEN read or delete parses `id`
+- THEN the provider SHALL return an error diagnostic describing the required format
+
+### Requirement: Delete (REQ-014)
+
+Delete SHALL derive the policy name from the composite `id` and SHALL call the delete lifecycle API for that name.
+
+#### Scenario: Delete uses policy name from id
+
+- GIVEN a valid `id` in state
+- WHEN delete runs
+- THEN the delete API SHALL be invoked for the policy name portion of `id`
+
+### Requirement: Phase and metadata validation (REQ-015–REQ-016)
+
+The resource SHALL require **at least one** of the phase blocks `hot`, `warm`, `cold`, `frozen`, or `delete`. The resource SHALL accept `metadata` only if it is **valid JSON**. JSON-valued allocation attributes (`include`, `exclude`, `require`) SHALL be valid JSON; where used, the provider SHALL apply **JSON-aware diff suppression** so equivalent objects do not churn the plan solely due to formatting.
+
+#### Scenario: No phase defined
+
+- GIVEN none of the phase blocks are set
+- WHEN Terraform validates configuration
+- THEN validation SHALL fail (at least one phase required)
+
+### Requirement: Server version compatibility for optional ILM fields (REQ-017)
+
+For ILM action settings that are only supported starting at a **minimum Elasticsearch version**, the resource SHALL compare the **connected server version** to that minimum when expanding configuration into the API model. If the server is **older** than the required version and the user has set a **non-default** value for that setting, the resource SHALL fail with a diagnostic that instructs removal of the setting or use of the default. If the value equals the default, the resource SHALL **omit** sending that unsupported setting in the policy payload.
+
+#### Scenario: Rollover min conditions on old cluster
+
+- GIVEN Elasticsearch < 8.4 and rollover **min_**\* conditions are set to non-default values
+- WHEN create or update expands the policy
+- THEN the provider SHALL return an error diagnostic
+
+#### Scenario: Allocate total_shards_per_node on old cluster
+
+- GIVEN Elasticsearch < 7.16 and `total_shards_per_node` is set to a non-default value
+- WHEN create or update expands the allocate action
+- THEN the provider SHALL return an error diagnostic
+
+### Requirement: Mapping for togglable actions (REQ-018)
+
+For actions **readonly**, **freeze**, and **unfollow**, the resource SHALL send the action to Elasticsearch only when **`enabled` is true**. When **`enabled` is false** but the user still declares the block (so Terraform can express “disabled”), read/flatten SHALL map state in a way that preserves that intent without falsely implying the action is active.
+
+#### Scenario: Disabled readonly block retained in config
+
+- GIVEN the user sets `readonly { enabled = false }` in a phase
+- WHEN state is refreshed from the API
+- THEN configuration SHALL be able to represent the disabled case without spurious enabled=true drift (per provider flatten rules)
+
+### Requirement: Unknown phase actions (REQ-019)
+
+If expansion encounters an action key that is not supported by the provider’s mapping for that phase, the resource SHALL fail with an error diagnostic indicating the action is not supported.
+
+#### Scenario: Unexpected action in expanded phase map
+
+- GIVEN an internal expansion path surfaces an unknown action name
+- WHEN the policy is expanded
+- THEN the provider SHALL return a diagnostic
+
+### Requirement: Single nested blocks for phases and actions (REQ-020)
+
+The resource SHALL model each of the phase blocks `hot`, `warm`, `cold`, `frozen`, and `delete` as a **Plugin Framework `SingleNestedBlock`** (at most one block per phase in configuration; state stores a single nested object or null when absent), not as a list nested block with a maximum length of one.
+
+Each ILM action block allowed under a phase (for example `set_priority`, `rollover`, `forcemerge`, `searchable_snapshot`, `wait_for_snapshot`, `delete`, and other actions defined by the provider schema) SHALL likewise be modeled as a **`SingleNestedBlock`**.
+
+The **`elasticsearch_connection`** block SHALL remain a **list nested block** as provided by the shared provider connection schema.
+
+#### Scenario: Phase block cardinality
+
+- GIVEN a Terraform configuration for this resource
+- WHEN the user declares a phase (for example `hot { ... }`)
+- THEN the schema SHALL allow at most one such block for that phase and SHALL persist that phase as an object-shaped value in state, not as a single-element list
+
+#### Scenario: Action block cardinality
+
+- GIVEN a phase that supports an ILM action block
+- WHEN the user declares that action (for example `forcemerge { ... }`)
+- THEN the schema SHALL allow at most one such block and SHALL persist it as an object-shaped value in state, not as a single-element list
+
+### Requirement: State schema version and upgrade (REQ-021)
+
+The resource SHALL use a **non-zero** `schema.Schema.Version` for this resource type after this change.
+
+The resource SHALL implement **`ResourceWithUpgradeState`** and SHALL migrate stored Terraform state from the **prior version** (list-shaped nested values for phases and ILM actions) to the **new version** (object-shaped nested values) for the same logical configuration.
+
+The migration SHALL unwrap list-encoded values **only** for known ILM phase keys and known ILM action keys under those phases (including the delete-phase ILM action block named `delete`). The migration SHALL **not** alter the encoding of **`elasticsearch_connection`**.
+
+#### Scenario: Upgrade from list-shaped phase state
+
+- GIVEN persisted state where a phase is stored as a JSON array containing one object
+- WHEN Terraform loads state and runs the state upgrader
+- THEN the upgraded state SHALL store that phase as a single object (or equivalent null) consistent with `SingleNestedBlock` semantics
+
+#### Scenario: Connection block unchanged by upgrade
+
+- GIVEN persisted state that includes `elasticsearch_connection` as a list
+- WHEN the state upgrader runs
+- THEN the `elasticsearch_connection` value SHALL remain list-shaped as defined by the connection schema
+
+### Requirement: Action fields optional with object-level AlsoRequires (REQ-022)
+
+For the ILM action blocks **`forcemerge`**, **`searchable_snapshot`**, **`set_priority`**, **`wait_for_snapshot`**, and **`downsample`**, each attribute that is **required for API correctness when the action is declared** SHALL be **optional** at the Terraform attribute level (so an entirely omitted action block does not force those attributes to appear).
+
+When the user **declares** one of these action blocks in configuration, validation SHALL require that all of the following previously required attributes are set (non-null), using object-level validation equivalent to **`objectvalidator.AlsoRequires`**:
+
+- **`forcemerge`**: `max_num_segments`
+- **`searchable_snapshot`**: `snapshot_repository`
+- **`set_priority`**: `priority`
+- **`wait_for_snapshot`**: `policy`
+- **`downsample`**: `fixed_interval`
+
+Existing attribute-level validators (for example minimum values) SHALL remain on those attributes where applicable.
+
+#### Scenario: Omitted action block is valid
+
+- GIVEN a phase without a particular action block (for example no `forcemerge` block)
+- WHEN Terraform validates configuration
+- THEN validation SHALL NOT fail solely because `max_num_segments` is unset
+
+#### Scenario: Empty action block is invalid
+
+- GIVEN the user declares `forcemerge { }` with no attributes
+- WHEN Terraform validates configuration
+- THEN validation SHALL fail with a diagnostic indicating the required fields when the block is present
+
+#### Scenario: Searchable snapshot requires repository when present
+
+- GIVEN the user declares `searchable_snapshot { force_merge_index = true }` without `snapshot_repository`
+- WHEN Terraform validates configuration
+- THEN validation SHALL fail with a diagnostic
diff --git a/provider/plugin_framework.go b/provider/plugin_framework.go
index e9012ccd3..9d572d7c5 100644
--- a/provider/plugin_framework.go
+++ b/provider/plugin_framework.go
@@ -11,6 +11,7 @@ import (
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/enrich"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/alias"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/datastreamlifecycle"
+ "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/ilm"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/index"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/indices"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/templateilmattachment"
@@ -148,6 +149,7 @@ func (p *Provider) resources(_ context.Context, validateLocation bool) []func()
func() resource.Resource { return monitor.NewResource(validateLocation) },
func() resource.Resource { return &apikey.Resource{} },
func() resource.Resource { return &datastreamlifecycle.Resource{} },
+ ilm.NewResource,
func() resource.Resource { return &connectors.Resource{} },
agentpolicy.NewResource,
integration.NewResource,
diff --git a/provider/provider.go b/provider/provider.go
index 6361d77d0..4b9c66b73 100644
--- a/provider/provider.go
+++ b/provider/provider.go
@@ -84,7 +84,6 @@ func New(version string) *schema.Provider {
"elasticstack_elasticsearch_cluster_settings": cluster.ResourceSettings(),
"elasticstack_elasticsearch_component_template": index.ResourceComponentTemplate(),
"elasticstack_elasticsearch_data_stream": index.ResourceDataStream(),
- "elasticstack_elasticsearch_index_lifecycle": index.ResourceIlm(),
"elasticstack_elasticsearch_index_template": index.ResourceTemplate(),
"elasticstack_elasticsearch_ingest_pipeline": ingest.ResourceIngestPipeline(),
"elasticstack_elasticsearch_logstash_pipeline": logstash.ResourceLogstashPipeline(),