Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
wrmedford authored Dec 24, 2024
2 parents 537d1bf + 8aa8b4d commit 57afdf5
Show file tree
Hide file tree
Showing 8 changed files with 49 additions and 117 deletions.
3 changes: 3 additions & 0 deletions .changelog/2650.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
Revert "Handle Jobs with ttl_seconds_after_finished = 0 correctly"
```
3 changes: 3 additions & 0 deletions .changelog/2653.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
`kubernetes_daemon_set_v1`: fix issue where fields `spec.strategy.rolling_update.max_surge` and `spec.strategy.rolling_update.max_unavailable` were not being validated correctly.
```
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
## 2.35.1 (Dec 20, 2024)

BUG FIXES:

* `resource/kubernetes_job_v1`: revert the changes introduced in v2.34.0, where `ttl_seconds_after_finished` was set to `0`. [[GH-2650](https://github.com/hashicorp/terraform-provider-kubernetes/issues/2650)]
* `resource/kubernetes_daemon_set_v1`: fix issue where fields `spec.strategy.rolling_update.max_surge` and `spec.strategy.rolling_update.max_unavailable` were not being validated correctly. [[GH-2653](https://github.com/hashicorp/terraform-provider-kubernetes/issues/2653)]

## 2.35.0 (Dec 12, 2024)

FEATURES:
Expand Down
4 changes: 2 additions & 2 deletions kubernetes/resource_kubernetes_daemon_set_v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,14 @@ func resourceKubernetesDaemonSetSchemaV1() map[string]*schema.Schema {
Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictionsduring disruption.",
Optional: true,
Default: 0,
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]%|100%)$`), ""),
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]?%|100%)$`), ""),
},
"max_unavailable": {
Type: schema.TypeString,
Description: "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
Optional: true,
Default: 1,
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]%|100%)$`), ""),
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]?%|100%)$`), ""),
},
},
},
Expand Down
32 changes: 30 additions & 2 deletions kubernetes/resource_kubernetes_daemon_set_v1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -453,10 +453,31 @@ func TestAccKubernetesDaemonSetV1_MaxSurge(t *testing.T) {
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "2"),
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "5"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "2"),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "5"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "10"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "10"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "100"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "100"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "5%"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "5%"),
),
},
{
Expand All @@ -466,6 +487,13 @@ func TestAccKubernetesDaemonSetV1_MaxSurge(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "10%"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "100%"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "100%"),
),
},
},
})
}
Expand Down
14 changes: 3 additions & 11 deletions kubernetes/resource_kubernetes_job_v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,17 +118,8 @@ func resourceKubernetesJobV1Read(ctx context.Context, d *schema.ResourceData, me
return diag.FromErr(err)
}
if !exists {
// Check if ttl_seconds_after_finished is set
if ttl, ok := d.GetOk("spec.0.ttl_seconds_after_finished"); ok {
// ttl_seconds_after_finished is set, Job is deleted due to TTL
// We don't need to remove the resource from the state
log.Printf("[INFO] Job %s has been deleted by Kubernetes due to TTL (ttl_seconds_after_finished = %v), keeping resource in state", d.Id(), ttl)
return diag.Diagnostics{}
} else {
// ttl_seconds_after_finished is not set, remove the resource from the state
d.SetId("")
return diag.Diagnostics{}
}
d.SetId("")
return diag.Diagnostics{}
}
conn, err := meta.(KubeClientsets).MainClientset()
if err != nil {
Expand Down Expand Up @@ -213,6 +204,7 @@ func resourceKubernetesJobV1Update(ctx context.Context, d *schema.ResourceData,
}
return resourceKubernetesJobV1Read(ctx, d, meta)
}

func resourceKubernetesJobV1Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
conn, err := meta.(KubeClientsets).MainClientset()
if err != nil {
Expand Down
101 changes: 0 additions & 101 deletions kubernetes/resource_kubernetes_job_v1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,82 +237,6 @@ func TestAccKubernetesJobV1_ttl_seconds_after_finished(t *testing.T) {
})
}

func TestAccKubernetesJobV1_customizeDiff_ttlZero(t *testing.T) {
var conf batchv1.Job
name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10))
imageName := busyboxImage
resourceName := "kubernetes_job_v1.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
skipIfClusterVersionLessThan(t, "1.21.0")
},
ProviderFactories: testAccProviderFactories,
Steps: []resource.TestStep{
// Step 1: Create the Job
{
Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesJobV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.ttl_seconds_after_finished", "0"),
),
},
// Step 2: Wait for the Job to complete and be deleted
{
PreConfig: func() {
time.Sleep(30 * time.Second)
},
Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0),
PlanOnly: true,
ExpectNonEmptyPlan: false,
},
},
})
}

func TestAccKubernetesJobV1_updateTTLFromZero(t *testing.T) {
var conf batchv1.Job
name := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10))
imageName := busyboxImage
resourceName := "kubernetes_job_v1.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
skipIfClusterVersionLessThan(t, "1.21.0")
},
ProviderFactories: testAccProviderFactories,
Steps: []resource.TestStep{
// Step 1: Create the Job with ttl_seconds_after_finished = 0
{
Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesJobV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.ttl_seconds_after_finished", "0"),
),
},
// Step 2: Wait for the Job to complete and be deleted
{
PreConfig: func() {
time.Sleep(30 * time.Second)
},
Config: testAccKubernetesJobV1Config_Diff(name, imageName, 0),
PlanOnly: true,
ExpectNonEmptyPlan: false,
},
// Step 3: Update the Job to ttl_seconds_after_finished = 5
{
Config: testAccKubernetesJobV1Config_Diff(name, imageName, 5),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesJobV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.ttl_seconds_after_finished", "5"),
),
},
},
})
}

func testAccCheckJobV1Waited(minDuration time.Duration) func(*terraform.State) error {
// NOTE this works because this function is called when setting up the test
// and the function it returns is called after the resource has been created
Expand Down Expand Up @@ -592,28 +516,3 @@ func testAccKubernetesJobV1Config_modified(name, imageName string) string {
wait_for_completion = false
}`, name, imageName)
}

func testAccKubernetesJobV1Config_Diff(name, imageName string, ttl int) string {
return fmt.Sprintf(`
resource "kubernetes_job_v1" "test" {
metadata {
name = "%s"
}
spec {
ttl_seconds_after_finished = %d
template {
metadata {}
spec {
container {
name = "wait-test"
image = "%s"
command = ["sleep", "20"]
}
restart_policy = "Never"
}
}
}
wait_for_completion = false
}
`, name, ttl, imageName)
}
2 changes: 1 addition & 1 deletion kubernetes/schema_job_spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ func jobSpecFields(specUpdatable bool) map[string]*schema.Schema {
"ttl_seconds_after_finished": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ForceNew: false,
ValidateFunc: func(value interface{}, key string) ([]string, []error) {
v, err := strconv.Atoi(value.(string))
if err != nil {
Expand Down

0 comments on commit 57afdf5

Please sign in to comment.